aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mwifiex
diff options
context:
space:
mode:
authorAndreas Fenkart <andreas.fenkart@streamunlimited.com>2013-04-04 23:03:53 -0400
committerJohn W. Linville <linville@tuxdriver.com>2013-04-08 15:28:40 -0400
commit2716fd7d455e277ad8676df794fe65bd1e1ba442 (patch)
treef5d963c27c3f82653e47ebf5d5603db666a8fcd3 /drivers/net/wireless/mwifiex
parent333f6b22c5b88a9d856703b440257f46efa714c8 (diff)
mwifiex: hold proper locks when accessing ra_list / bss_prio lists
Not locking ra_list when dequeuing packets creates race conditions. When adding a packet 'tx_pkts_queued' is modified before setting highest_priority_queue. If in-between the main loop starts, it will see a packet queued (tx_pkts_queued > 0) but will not find it, since max prio is not set yet. Depending on the scheduling, the thread trying to add the packet could complete and restore the situation. But this is not something to rely on. Another race condition exists, if a new packet, exceeding current max prio is added. If concurrently a packet is dequeued, the newly set max prio will be overwritten with the value of the dequeued packet. This can occur, because selecting a packet and modifying the max prio is not atomic. The result in an infinite loop unless, a new packet is added that has at least the priority of the hidden packet. Same applies to bss_prio_tbl. Forward iteration is no proper lock-free technique and provides no protection from calls to list_del. Although BSS are currently not added/removed dynamically, this must not be the case in the future. Hence always hold proper locks when accessing those lists. Signed-off-by: Andreas Fenkart <andreas.fenkart@streamunlimited.com> Signed-off-by: Bing Zhao <bzhao@marvell.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/mwifiex')
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c55
1 files changed, 29 insertions, 26 deletions
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 75c8e80bb62f..2cc81ba590e3 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -685,13 +685,13 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
685 ra_list->total_pkts_size += skb->len; 685 ra_list->total_pkts_size += skb->len;
686 ra_list->pkt_count++; 686 ra_list->pkt_count++;
687 687
688 atomic_inc(&priv->wmm.tx_pkts_queued);
689
690 if (atomic_read(&priv->wmm.highest_queued_prio) < 688 if (atomic_read(&priv->wmm.highest_queued_prio) <
691 tos_to_tid_inv[tid_down]) 689 tos_to_tid_inv[tid_down])
692 atomic_set(&priv->wmm.highest_queued_prio, 690 atomic_set(&priv->wmm.highest_queued_prio,
693 tos_to_tid_inv[tid_down]); 691 tos_to_tid_inv[tid_down]);
694 692
693 atomic_inc(&priv->wmm.tx_pkts_queued);
694
695 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); 695 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
696} 696}
697 697
@@ -887,19 +887,15 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
887 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head; 887 struct mwifiex_bss_prio_node *bssprio_node, *bssprio_head;
888 struct mwifiex_tid_tbl *tid_ptr; 888 struct mwifiex_tid_tbl *tid_ptr;
889 atomic_t *hqp; 889 atomic_t *hqp;
890 int is_list_empty; 890 unsigned long flags_bss, flags_ra;
891 unsigned long flags;
892 int i, j; 891 int i, j;
893 892
894 for (j = adapter->priv_num - 1; j >= 0; --j) { 893 for (j = adapter->priv_num - 1; j >= 0; --j) {
895 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock, 894 spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock,
896 flags); 895 flags_bss);
897 is_list_empty = list_empty(&adapter->bss_prio_tbl[j] 896
898 .bss_prio_head); 897 if (list_empty(&adapter->bss_prio_tbl[j].bss_prio_head))
899 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, 898 goto skip_prio_tbl;
900 flags);
901 if (is_list_empty)
902 continue;
903 899
904 if (adapter->bss_prio_tbl[j].bss_prio_cur == 900 if (adapter->bss_prio_tbl[j].bss_prio_cur ==
905 (struct mwifiex_bss_prio_node *) 901 (struct mwifiex_bss_prio_node *)
@@ -924,21 +920,18 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
924 hqp = &priv_tmp->wmm.highest_queued_prio; 920 hqp = &priv_tmp->wmm.highest_queued_prio;
925 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) { 921 for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
926 922
923 spin_lock_irqsave(&priv_tmp->wmm.
924 ra_list_spinlock, flags_ra);
925
927 tid_ptr = &(priv_tmp)->wmm. 926 tid_ptr = &(priv_tmp)->wmm.
928 tid_tbl_ptr[tos_to_tid[i]]; 927 tid_tbl_ptr[tos_to_tid[i]];
929 928
930 /* For non-STA ra_list_curr may be NULL */ 929 /* For non-STA ra_list_curr may be NULL */
931 if (!tid_ptr->ra_list_curr) 930 if (!tid_ptr->ra_list_curr)
932 continue; 931 goto skip_wmm_queue;
933 932
934 spin_lock_irqsave(&priv_tmp->wmm. 933 if (list_empty(&tid_ptr->ra_list))
935 ra_list_spinlock, flags); 934 goto skip_wmm_queue;
936 is_list_empty =
937 list_empty(&tid_ptr->ra_list);
938 spin_unlock_irqrestore(&priv_tmp->wmm.
939 ra_list_spinlock, flags);
940 if (is_list_empty)
941 continue;
942 935
943 /* 936 /*
944 * Always choose the next ra we transmitted 937 * Always choose the next ra we transmitted
@@ -960,10 +953,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
960 } 953 }
961 954
962 do { 955 do {
963 is_list_empty = 956 if (!skb_queue_empty(&ptr->skb_head))
964 skb_queue_empty(&ptr->skb_head); 957 /* holds both locks */
965
966 if (!is_list_empty)
967 goto found; 958 goto found;
968 959
969 /* Get next ra */ 960 /* Get next ra */
@@ -978,6 +969,11 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
978 struct mwifiex_ra_list_tbl, 969 struct mwifiex_ra_list_tbl,
979 list); 970 list);
980 } while (ptr != head); 971 } while (ptr != head);
972
973skip_wmm_queue:
974 spin_unlock_irqrestore(&priv_tmp->wmm.
975 ra_list_spinlock,
976 flags_ra);
981 } 977 }
982 978
983skip_bss: 979skip_bss:
@@ -995,14 +991,21 @@ skip_bss:
995 struct mwifiex_bss_prio_node, 991 struct mwifiex_bss_prio_node,
996 list); 992 list);
997 } while (bssprio_node != bssprio_head); 993 } while (bssprio_node != bssprio_head);
994
995skip_prio_tbl:
996 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
997 flags_bss);
998 } 998 }
999
999 return NULL; 1000 return NULL;
1000 1001
1001found: 1002found:
1002 spin_lock_irqsave(&priv_tmp->wmm.ra_list_spinlock, flags); 1003 /* holds bss_prio_lock / ra_list_spinlock */
1003 if (atomic_read(hqp) > i) 1004 if (atomic_read(hqp) > i)
1004 atomic_set(hqp, i); 1005 atomic_set(hqp, i);
1005 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags); 1006 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra);
1007 spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock,
1008 flags_bss);
1006 1009
1007 *priv = priv_tmp; 1010 *priv = priv_tmp;
1008 *tid = tos_to_tid[i]; 1011 *tid = tos_to_tid[i];