aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax
diff options
context:
space:
mode:
authorPrasanna S. Panchamukhi <prasannax.s.panchamukhi@intel.com>2010-04-13 19:35:58 -0400
committerInaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>2010-05-11 17:08:23 -0400
commitd11a6e4495ee1fbb38b59bc88d49d050d3736929 (patch)
tree08afc7d7909dc451878f2ec04747071e2999e6e4 /drivers/net/wimax
parentded0fd62a8a7cb3b12bb007079bff2b858a12d2b (diff)
wimax i2400m: fix race condition while accessing rx_roq by using kref count
This patch fixes the race condition when one thread tries to destroy the memory allocated for rx_roq, while another thread still happen to access rx_roq. Such a race condition occurs when i2400m-sdio kernel module gets unloaded, destroying the memory allocated for rx_roq while rx_roq is accessed by i2400m_rx_edata(), as explained below: $thread1 $thread2 $ void i2400m_rx_edata() $ $Access rx_roq[] $ $roq = &i2400m->rx_roq[ro_cin] $ $ i2400m_roq_[reset/queue/update_ws] $ $ $ void i2400m_rx_release(); $ $kfree(rx->roq); $ $rx->roq = NULL; $Oops! rx_roq is NULL This patch fixes the race condition using refcount approach. Signed-off-by: Prasanna S. Panchamukhi <prasannax.s.panchamukhi@intel.com>
Diffstat (limited to 'drivers/net/wimax')
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h12
-rw-r--r--drivers/net/wimax/i2400m/rx.c44
2 files changed, 48 insertions, 8 deletions
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 7a9c2c5b25cb..b8c7dbfead49 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -412,7 +412,7 @@ struct i2400m_barker_db;
412 * 412 *
413 * @tx_size_max: biggest TX message sent. 413 * @tx_size_max: biggest TX message sent.
414 * 414 *
415 * @rx_lock: spinlock to protect RX members 415 * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
416 * 416 *
417 * @rx_pl_num: total number of payloads received 417 * @rx_pl_num: total number of payloads received
418 * 418 *
@@ -436,6 +436,10 @@ struct i2400m_barker_db;
436 * delivered. Then the driver can release them to the host. See 436 * delivered. Then the driver can release them to the host. See
437 * drivers/net/i2400m/rx.c for details. 437 * drivers/net/i2400m/rx.c for details.
438 * 438 *
439 * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
440 * rx_roq thus preventing rx_roq being destroyed when rx_roq
441 * is being accessed. rx_roq_refcount is protected by rx_lock.
442 *
439 * @rx_reports: reports received from the device that couldn't be 443 * @rx_reports: reports received from the device that couldn't be
440 * processed because the driver wasn't still ready; when ready, 444 * processed because the driver wasn't still ready; when ready,
441 * they are pulled from here and chewed. 445 * they are pulled from here and chewed.
@@ -597,10 +601,12 @@ struct i2400m {
597 tx_num, tx_size_acc, tx_size_min, tx_size_max; 601 tx_num, tx_size_acc, tx_size_min, tx_size_max;
598 602
599 /* RX stuff */ 603 /* RX stuff */
600 spinlock_t rx_lock; /* protect RX state */ 604 /* protect RX state and rx_roq_refcount */
605 spinlock_t rx_lock;
601 unsigned rx_pl_num, rx_pl_max, rx_pl_min, 606 unsigned rx_pl_num, rx_pl_max, rx_pl_min,
602 rx_num, rx_size_acc, rx_size_min, rx_size_max; 607 rx_num, rx_size_acc, rx_size_min, rx_size_max;
603 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 608 struct i2400m_roq *rx_roq; /* access is refcounted */
609 struct kref rx_roq_refcount; /* refcount access to rx_roq */
604 u8 src_mac_addr[ETH_HLEN]; 610 u8 src_mac_addr[ETH_HLEN];
605 struct list_head rx_reports; /* under rx_lock! */ 611 struct list_head rx_reports; /* under rx_lock! */
606 struct work_struct rx_report_ws; 612 struct work_struct rx_report_ws;
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b9..71b697f3a68d 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -917,6 +917,25 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
917 917
918 918
919/* 919/*
920 * This routine destroys the memory allocated for rx_roq, when no
921 * other thread is accessing it. Access to rx_roq is refcounted by
922 * rx_roq_refcount, hence memory allocated must be destroyed when
923 * rx_roq_refcount becomes zero. This routine gets executed when
924 * rx_roq_refcount becomes zero.
925 */
926void i2400m_rx_roq_destroy(struct kref *ref)
927{
928 unsigned itr;
929 struct i2400m *i2400m
930 = container_of(ref, struct i2400m, rx_roq_refcount);
931 for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
932 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
933 kfree(i2400m->rx_roq[0].log);
934 kfree(i2400m->rx_roq);
935 i2400m->rx_roq = NULL;
936}
937
938/*
920 * Receive and send up an extended data packet 939 * Receive and send up an extended data packet
921 * 940 *
922 * @i2400m: device descriptor 941 * @i2400m: device descriptor
@@ -969,6 +988,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
969 unsigned ro_needed, ro_type, ro_cin, ro_sn; 988 unsigned ro_needed, ro_type, ro_cin, ro_sn;
970 struct i2400m_roq *roq; 989 struct i2400m_roq *roq;
971 struct i2400m_roq_data *roq_data; 990 struct i2400m_roq_data *roq_data;
991 unsigned long flags;
972 992
973 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); 993 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
974 994
@@ -1007,7 +1027,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1007 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; 1027 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
1008 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1028 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1009 1029
1030 spin_lock_irqsave(&i2400m->rx_lock, flags);
1010 roq = &i2400m->rx_roq[ro_cin]; 1031 roq = &i2400m->rx_roq[ro_cin];
1032 if (roq == NULL) {
1033 kfree_skb(skb); /* rx_roq is already destroyed */
1034 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1035 goto error;
1036 }
1037 kref_get(&i2400m->rx_roq_refcount);
1038 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1039
1011 roq_data = (struct i2400m_roq_data *) &skb->cb; 1040 roq_data = (struct i2400m_roq_data *) &skb->cb;
1012 roq_data->sn = ro_sn; 1041 roq_data->sn = ro_sn;
1013 roq_data->cs = cs; 1042 roq_data->cs = cs;
@@ -1034,6 +1063,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1034 default: 1063 default:
1035 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); 1064 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
1036 } 1065 }
1066
1067 spin_lock_irqsave(&i2400m->rx_lock, flags);
1068 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1069 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1037 } 1070 }
1038 else 1071 else
1039 i2400m_net_erx(i2400m, skb, cs); 1072 i2400m_net_erx(i2400m, skb, cs);
@@ -1344,6 +1377,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
1344 __i2400m_roq_init(&i2400m->rx_roq[itr]); 1377 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1345 i2400m->rx_roq[itr].log = &rd[itr]; 1378 i2400m->rx_roq[itr].log = &rd[itr];
1346 } 1379 }
1380 kref_init(&i2400m->rx_roq_refcount);
1347 } 1381 }
1348 return 0; 1382 return 0;
1349 1383
@@ -1357,12 +1391,12 @@ error_roq_alloc:
1357/* Tear down the RX queue and infrastructure */ 1391/* Tear down the RX queue and infrastructure */
1358void i2400m_rx_release(struct i2400m *i2400m) 1392void i2400m_rx_release(struct i2400m *i2400m)
1359{ 1393{
1394 unsigned long flags;
1395
1360 if (i2400m->rx_reorder) { 1396 if (i2400m->rx_reorder) {
1361 unsigned itr; 1397 spin_lock_irqsave(&i2400m->rx_lock, flags);
1362 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) 1398 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1363 __skb_queue_purge(&i2400m->rx_roq[itr].queue); 1399 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1364 kfree(i2400m->rx_roq[0].log);
1365 kfree(i2400m->rx_roq);
1366 } 1400 }
1367 /* at this point, nothing can be received... */ 1401 /* at this point, nothing can be received... */
1368 i2400m_report_hook_flush(i2400m); 1402 i2400m_report_hook_flush(i2400m);