diff options
author | Yogesh Ashok Powar <yogeshp@marvell.com> | 2012-03-12 22:35:13 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2012-03-13 14:54:19 -0400 |
commit | 8c00228e98bd2c8ab92b6f364f196cb60a7ff76d (patch) | |
tree | 7783850bca4b64b8e2a68907d3c70b2055ee5808 | |
parent | bb7de2bad5cb527e5fdc6b64d7f5d6e5009f2962 (diff) |
mwifiex: rename fuctions and variables for better readability
Renaming the long fuctions and variable names from 11n_rxreoder.c
file to shorter ones for better readability.
Signed-off-by: Yogesh Ashok Powar <yogeshp@marvell.com>
Signed-off-by: Bing Zhao <bzhao@marvell.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r-- | drivers/net/wireless/mwifiex/11n_rxreorder.c | 160 |
1 files changed, 73 insertions, 87 deletions
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c index 491cd85cb6f2..06f5d3684027 100644 --- a/drivers/net/wireless/mwifiex/11n_rxreorder.c +++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c | |||
@@ -27,31 +27,31 @@ | |||
27 | #include "11n_rxreorder.h" | 27 | #include "11n_rxreorder.h" |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * This function dispatches all packets in the Rx reorder table. | 30 | * This function dispatches all packets in the Rx reorder table until the |
31 | * start window. | ||
31 | * | 32 | * |
32 | * There could be holes in the buffer, which are skipped by the function. | 33 | * There could be holes in the buffer, which are skipped by the function. |
33 | * Since the buffer is linear, the function uses rotation to simulate | 34 | * Since the buffer is linear, the function uses rotation to simulate |
34 | * circular buffer. | 35 | * circular buffer. |
35 | */ | 36 | */ |
36 | static void | 37 | static void |
37 | mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, | 38 | mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, |
38 | struct mwifiex_rx_reorder_tbl | 39 | struct mwifiex_rx_reorder_tbl *tbl, int start_win) |
39 | *rx_reor_tbl_ptr, int start_win) | ||
40 | { | 40 | { |
41 | int no_pkt_to_send, i; | 41 | int pkt_to_send, i; |
42 | void *rx_tmp_ptr; | 42 | void *rx_tmp_ptr; |
43 | unsigned long flags; | 43 | unsigned long flags; |
44 | 44 | ||
45 | no_pkt_to_send = (start_win > rx_reor_tbl_ptr->start_win) ? | 45 | pkt_to_send = (start_win > tbl->start_win) ? |
46 | min((start_win - rx_reor_tbl_ptr->start_win), | 46 | min((start_win - tbl->start_win), tbl->win_size) : |
47 | rx_reor_tbl_ptr->win_size) : rx_reor_tbl_ptr->win_size; | 47 | tbl->win_size; |
48 | 48 | ||
49 | for (i = 0; i < no_pkt_to_send; ++i) { | 49 | for (i = 0; i < pkt_to_send; ++i) { |
50 | spin_lock_irqsave(&priv->rx_pkt_lock, flags); | 50 | spin_lock_irqsave(&priv->rx_pkt_lock, flags); |
51 | rx_tmp_ptr = NULL; | 51 | rx_tmp_ptr = NULL; |
52 | if (rx_reor_tbl_ptr->rx_reorder_ptr[i]) { | 52 | if (tbl->rx_reorder_ptr[i]) { |
53 | rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i]; | 53 | rx_tmp_ptr = tbl->rx_reorder_ptr[i]; |
54 | rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL; | 54 | tbl->rx_reorder_ptr[i] = NULL; |
55 | } | 55 | } |
56 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); | 56 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); |
57 | if (rx_tmp_ptr) | 57 | if (rx_tmp_ptr) |
@@ -63,13 +63,12 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, | |||
63 | * We don't have a circular buffer, hence use rotation to simulate | 63 | * We don't have a circular buffer, hence use rotation to simulate |
64 | * circular buffer | 64 | * circular buffer |
65 | */ | 65 | */ |
66 | for (i = 0; i < rx_reor_tbl_ptr->win_size - no_pkt_to_send; ++i) { | 66 | for (i = 0; i < tbl->win_size - pkt_to_send; ++i) { |
67 | rx_reor_tbl_ptr->rx_reorder_ptr[i] = | 67 | tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i]; |
68 | rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i]; | 68 | tbl->rx_reorder_ptr[pkt_to_send + i] = NULL; |
69 | rx_reor_tbl_ptr->rx_reorder_ptr[no_pkt_to_send + i] = NULL; | ||
70 | } | 69 | } |
71 | 70 | ||
72 | rx_reor_tbl_ptr->start_win = start_win; | 71 | tbl->start_win = start_win; |
73 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); | 72 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); |
74 | } | 73 | } |
75 | 74 | ||
@@ -83,20 +82,20 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv, | |||
83 | */ | 82 | */ |
84 | static void | 83 | static void |
85 | mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, | 84 | mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, |
86 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr) | 85 | struct mwifiex_rx_reorder_tbl *tbl) |
87 | { | 86 | { |
88 | int i, j, xchg; | 87 | int i, j, xchg; |
89 | void *rx_tmp_ptr; | 88 | void *rx_tmp_ptr; |
90 | unsigned long flags; | 89 | unsigned long flags; |
91 | 90 | ||
92 | for (i = 0; i < rx_reor_tbl_ptr->win_size; ++i) { | 91 | for (i = 0; i < tbl->win_size; ++i) { |
93 | spin_lock_irqsave(&priv->rx_pkt_lock, flags); | 92 | spin_lock_irqsave(&priv->rx_pkt_lock, flags); |
94 | if (!rx_reor_tbl_ptr->rx_reorder_ptr[i]) { | 93 | if (!tbl->rx_reorder_ptr[i]) { |
95 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); | 94 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); |
96 | break; | 95 | break; |
97 | } | 96 | } |
98 | rx_tmp_ptr = rx_reor_tbl_ptr->rx_reorder_ptr[i]; | 97 | rx_tmp_ptr = tbl->rx_reorder_ptr[i]; |
99 | rx_reor_tbl_ptr->rx_reorder_ptr[i] = NULL; | 98 | tbl->rx_reorder_ptr[i] = NULL; |
100 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); | 99 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); |
101 | mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); | 100 | mwifiex_process_rx_packet(priv->adapter, rx_tmp_ptr); |
102 | } | 101 | } |
@@ -107,15 +106,13 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, | |||
107 | * circular buffer | 106 | * circular buffer |
108 | */ | 107 | */ |
109 | if (i > 0) { | 108 | if (i > 0) { |
110 | xchg = rx_reor_tbl_ptr->win_size - i; | 109 | xchg = tbl->win_size - i; |
111 | for (j = 0; j < xchg; ++j) { | 110 | for (j = 0; j < xchg; ++j) { |
112 | rx_reor_tbl_ptr->rx_reorder_ptr[j] = | 111 | tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j]; |
113 | rx_reor_tbl_ptr->rx_reorder_ptr[i + j]; | 112 | tbl->rx_reorder_ptr[i + j] = NULL; |
114 | rx_reor_tbl_ptr->rx_reorder_ptr[i + j] = NULL; | ||
115 | } | 113 | } |
116 | } | 114 | } |
117 | rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i) | 115 | tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1); |
118 | &(MAX_TID_VALUE - 1); | ||
119 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); | 116 | spin_unlock_irqrestore(&priv->rx_pkt_lock, flags); |
120 | } | 117 | } |
121 | 118 | ||
@@ -126,28 +123,25 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv, | |||
126 | * pending packets in the Rx reorder table before deletion. | 123 | * pending packets in the Rx reorder table before deletion. |
127 | */ | 124 | */ |
128 | static void | 125 | static void |
129 | mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv, | 126 | mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv, |
130 | struct mwifiex_rx_reorder_tbl | 127 | struct mwifiex_rx_reorder_tbl *tbl) |
131 | *rx_reor_tbl_ptr) | ||
132 | { | 128 | { |
133 | unsigned long flags; | 129 | unsigned long flags; |
134 | 130 | ||
135 | if (!rx_reor_tbl_ptr) | 131 | if (!tbl) |
136 | return; | 132 | return; |
137 | 133 | ||
138 | mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr, | 134 | mwifiex_11n_dispatch_pkt(priv, tbl, (tbl->start_win + tbl->win_size) & |
139 | (rx_reor_tbl_ptr->start_win + | 135 | (MAX_TID_VALUE - 1)); |
140 | rx_reor_tbl_ptr->win_size) | ||
141 | &(MAX_TID_VALUE - 1)); | ||
142 | 136 | ||
143 | del_timer(&rx_reor_tbl_ptr->timer_context.timer); | 137 | del_timer(&tbl->timer_context.timer); |
144 | 138 | ||
145 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); | 139 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); |
146 | list_del(&rx_reor_tbl_ptr->list); | 140 | list_del(&tbl->list); |
147 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); | 141 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); |
148 | 142 | ||
149 | kfree(rx_reor_tbl_ptr->rx_reorder_ptr); | 143 | kfree(tbl->rx_reorder_ptr); |
150 | kfree(rx_reor_tbl_ptr); | 144 | kfree(tbl); |
151 | } | 145 | } |
152 | 146 | ||
153 | /* | 147 | /* |
@@ -157,16 +151,15 @@ mwifiex_11n_delete_rx_reorder_tbl_entry(struct mwifiex_private *priv, | |||
157 | static struct mwifiex_rx_reorder_tbl * | 151 | static struct mwifiex_rx_reorder_tbl * |
158 | mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) | 152 | mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta) |
159 | { | 153 | { |
160 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; | 154 | struct mwifiex_rx_reorder_tbl *tbl; |
161 | unsigned long flags; | 155 | unsigned long flags; |
162 | 156 | ||
163 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); | 157 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); |
164 | list_for_each_entry(rx_reor_tbl_ptr, &priv->rx_reorder_tbl_ptr, list) { | 158 | list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) { |
165 | if ((!memcmp(rx_reor_tbl_ptr->ta, ta, ETH_ALEN)) | 159 | if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { |
166 | && (rx_reor_tbl_ptr->tid == tid)) { | ||
167 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, | 160 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, |
168 | flags); | 161 | flags); |
169 | return rx_reor_tbl_ptr; | 162 | return tbl; |
170 | } | 163 | } |
171 | } | 164 | } |
172 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); | 165 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); |
@@ -200,22 +193,19 @@ mwifiex_11n_find_last_seq_num(struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr) | |||
200 | static void | 193 | static void |
201 | mwifiex_flush_data(unsigned long context) | 194 | mwifiex_flush_data(unsigned long context) |
202 | { | 195 | { |
203 | struct reorder_tmr_cnxt *reorder_cnxt = | 196 | struct reorder_tmr_cnxt *ctx = |
204 | (struct reorder_tmr_cnxt *) context; | 197 | (struct reorder_tmr_cnxt *) context; |
205 | int start_win; | 198 | int start_win; |
206 | 199 | ||
207 | start_win = mwifiex_11n_find_last_seq_num(reorder_cnxt->ptr); | 200 | start_win = mwifiex_11n_find_last_seq_num(ctx->ptr); |
208 | 201 | ||
209 | if (start_win < 0) | 202 | if (start_win < 0) |
210 | return; | 203 | return; |
211 | 204 | ||
212 | dev_dbg(reorder_cnxt->priv->adapter->dev, "info: flush data %d\n", | 205 | dev_dbg(ctx->priv->adapter->dev, "info: flush data %d\n", start_win); |
213 | start_win); | 206 | mwifiex_11n_dispatch_pkt(ctx->priv, ctx->ptr, |
214 | mwifiex_11n_dispatch_pkt_until_start_win(reorder_cnxt->priv, | 207 | (ctx->ptr->start_win + start_win + 1) & |
215 | reorder_cnxt->ptr, | 208 | (MAX_TID_VALUE - 1)); |
216 | ((reorder_cnxt->ptr->start_win | ||
217 | + start_win + 1) & | ||
218 | (MAX_TID_VALUE - 1))); | ||
219 | } | 209 | } |
220 | 210 | ||
221 | /* | 211 | /* |
@@ -233,7 +223,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, | |||
233 | int tid, int win_size, int seq_num) | 223 | int tid, int win_size, int seq_num) |
234 | { | 224 | { |
235 | int i; | 225 | int i; |
236 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr, *new_node; | 226 | struct mwifiex_rx_reorder_tbl *tbl, *new_node; |
237 | u16 last_seq = 0; | 227 | u16 last_seq = 0; |
238 | unsigned long flags; | 228 | unsigned long flags; |
239 | 229 | ||
@@ -241,13 +231,12 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta, | |||
241 | * If we get a TID, ta pair which is already present dispatch all the | 231 | * If we get a TID, ta pair which is already present dispatch all the |
242 | * the packets and move the window size until the ssn | 232 | * the packets and move the window size until the ssn |
243 | */ | 233 | */ |
244 | rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); | 234 | tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); |
245 | if (rx_reor_tbl_ptr) { | 235 | if (tbl) { |
246 | mwifiex_11n_dispatch_pkt_until_start_win(priv, rx_reor_tbl_ptr, | 236 | mwifiex_11n_dispatch_pkt(priv, tbl, seq_num); |
247 | seq_num); | ||
248 | return; | 237 | return; |
249 | } | 238 | } |
250 | /* if !rx_reor_tbl_ptr then create one */ | 239 | /* if !tbl then create one */ |
251 | new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); | 240 | new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL); |
252 | if (!new_node) { | 241 | if (!new_node) { |
253 | dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n", | 242 | dev_err(priv->adapter->dev, "%s: failed to alloc new_node\n", |
@@ -404,24 +393,23 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, | |||
404 | u16 seq_num, u16 tid, | 393 | u16 seq_num, u16 tid, |
405 | u8 *ta, u8 pkt_type, void *payload) | 394 | u8 *ta, u8 pkt_type, void *payload) |
406 | { | 395 | { |
407 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; | 396 | struct mwifiex_rx_reorder_tbl *tbl; |
408 | int start_win, end_win, win_size; | 397 | int start_win, end_win, win_size; |
409 | u16 pkt_index; | 398 | u16 pkt_index; |
410 | 399 | ||
411 | rx_reor_tbl_ptr = | 400 | tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, |
412 | mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv, | 401 | tid, ta); |
413 | tid, ta); | 402 | if (!tbl) { |
414 | if (!rx_reor_tbl_ptr) { | ||
415 | if (pkt_type != PKT_TYPE_BAR) | 403 | if (pkt_type != PKT_TYPE_BAR) |
416 | mwifiex_process_rx_packet(priv->adapter, payload); | 404 | mwifiex_process_rx_packet(priv->adapter, payload); |
417 | return 0; | 405 | return 0; |
418 | } | 406 | } |
419 | start_win = rx_reor_tbl_ptr->start_win; | 407 | start_win = tbl->start_win; |
420 | win_size = rx_reor_tbl_ptr->win_size; | 408 | win_size = tbl->win_size; |
421 | end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); | 409 | end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); |
422 | del_timer(&rx_reor_tbl_ptr->timer_context.timer); | 410 | del_timer(&tbl->timer_context.timer); |
423 | mod_timer(&rx_reor_tbl_ptr->timer_context.timer, jiffies | 411 | mod_timer(&tbl->timer_context.timer, |
424 | + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000); | 412 | jiffies + (MIN_FLUSH_TIMER_MS * win_size * HZ) / 1000); |
425 | 413 | ||
426 | /* | 414 | /* |
427 | * If seq_num is less then starting win then ignore and drop the | 415 | * If seq_num is less then starting win then ignore and drop the |
@@ -452,8 +440,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, | |||
452 | start_win = (end_win - win_size) + 1; | 440 | start_win = (end_win - win_size) + 1; |
453 | else | 441 | else |
454 | start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; | 442 | start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1; |
455 | mwifiex_11n_dispatch_pkt_until_start_win(priv, | 443 | mwifiex_11n_dispatch_pkt(priv, tbl, start_win); |
456 | rx_reor_tbl_ptr, start_win); | ||
457 | } | 444 | } |
458 | 445 | ||
459 | if (pkt_type != PKT_TYPE_BAR) { | 446 | if (pkt_type != PKT_TYPE_BAR) { |
@@ -462,17 +449,17 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv, | |||
462 | else | 449 | else |
463 | pkt_index = (seq_num+MAX_TID_VALUE) - start_win; | 450 | pkt_index = (seq_num+MAX_TID_VALUE) - start_win; |
464 | 451 | ||
465 | if (rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index]) | 452 | if (tbl->rx_reorder_ptr[pkt_index]) |
466 | return -1; | 453 | return -1; |
467 | 454 | ||
468 | rx_reor_tbl_ptr->rx_reorder_ptr[pkt_index] = payload; | 455 | tbl->rx_reorder_ptr[pkt_index] = payload; |
469 | } | 456 | } |
470 | 457 | ||
471 | /* | 458 | /* |
472 | * Dispatch all packets sequentially from start_win until a | 459 | * Dispatch all packets sequentially from start_win until a |
473 | * hole is found and adjust the start_win appropriately | 460 | * hole is found and adjust the start_win appropriately |
474 | */ | 461 | */ |
475 | mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr); | 462 | mwifiex_11n_scan_and_dispatch(priv, tbl); |
476 | 463 | ||
477 | return 0; | 464 | return 0; |
478 | } | 465 | } |
@@ -486,7 +473,7 @@ void | |||
486 | mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, | 473 | mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, |
487 | u8 type, int initiator) | 474 | u8 type, int initiator) |
488 | { | 475 | { |
489 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; | 476 | struct mwifiex_rx_reorder_tbl *tbl; |
490 | struct mwifiex_tx_ba_stream_tbl *ptx_tbl; | 477 | struct mwifiex_tx_ba_stream_tbl *ptx_tbl; |
491 | u8 cleanup_rx_reorder_tbl; | 478 | u8 cleanup_rx_reorder_tbl; |
492 | unsigned long flags; | 479 | unsigned long flags; |
@@ -500,14 +487,14 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac, | |||
500 | "initiator=%d\n", peer_mac, tid, initiator); | 487 | "initiator=%d\n", peer_mac, tid, initiator); |
501 | 488 | ||
502 | if (cleanup_rx_reorder_tbl) { | 489 | if (cleanup_rx_reorder_tbl) { |
503 | rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, tid, | 490 | tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, |
504 | peer_mac); | 491 | peer_mac); |
505 | if (!rx_reor_tbl_ptr) { | 492 | if (!tbl) { |
506 | dev_dbg(priv->adapter->dev, | 493 | dev_dbg(priv->adapter->dev, |
507 | "event: TID, TA not found in table\n"); | 494 | "event: TID, TA not found in table\n"); |
508 | return; | 495 | return; |
509 | } | 496 | } |
510 | mwifiex_11n_delete_rx_reorder_tbl_entry(priv, rx_reor_tbl_ptr); | 497 | mwifiex_del_rx_reorder_entry(priv, tbl); |
511 | } else { | 498 | } else { |
512 | ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); | 499 | ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac); |
513 | if (!ptx_tbl) { | 500 | if (!ptx_tbl) { |
@@ -535,7 +522,7 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, | |||
535 | (struct host_cmd_ds_11n_addba_rsp *) | 522 | (struct host_cmd_ds_11n_addba_rsp *) |
536 | &resp->params.add_ba_rsp; | 523 | &resp->params.add_ba_rsp; |
537 | int tid, win_size; | 524 | int tid, win_size; |
538 | struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr; | 525 | struct mwifiex_rx_reorder_tbl *tbl; |
539 | uint16_t block_ack_param_set; | 526 | uint16_t block_ack_param_set; |
540 | 527 | ||
541 | block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); | 528 | block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); |
@@ -557,13 +544,12 @@ int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv, | |||
557 | tid, add_ba_rsp->ssn, win_size); | 544 | tid, add_ba_rsp->ssn, win_size); |
558 | } else { | 545 | } else { |
559 | dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", | 546 | dev_err(priv->adapter->dev, "ADDBA RSP: failed %pM tid=%d)\n", |
560 | add_ba_rsp->peer_mac_addr, tid); | 547 | add_ba_rsp->peer_mac_addr, tid); |
561 | 548 | ||
562 | rx_reor_tbl_ptr = mwifiex_11n_get_rx_reorder_tbl(priv, | 549 | tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, |
563 | tid, add_ba_rsp->peer_mac_addr); | 550 | add_ba_rsp->peer_mac_addr); |
564 | if (rx_reor_tbl_ptr) | 551 | if (tbl) |
565 | mwifiex_11n_delete_rx_reorder_tbl_entry(priv, | 552 | mwifiex_del_rx_reorder_entry(priv, tbl); |
566 | rx_reor_tbl_ptr); | ||
567 | } | 553 | } |
568 | 554 | ||
569 | return 0; | 555 | return 0; |
@@ -602,7 +588,7 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv) | |||
602 | list_for_each_entry_safe(del_tbl_ptr, tmp_node, | 588 | list_for_each_entry_safe(del_tbl_ptr, tmp_node, |
603 | &priv->rx_reorder_tbl_ptr, list) { | 589 | &priv->rx_reorder_tbl_ptr, list) { |
604 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); | 590 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); |
605 | mwifiex_11n_delete_rx_reorder_tbl_entry(priv, del_tbl_ptr); | 591 | mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr); |
606 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); | 592 | spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); |
607 | } | 593 | } |
608 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); | 594 | spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); |