diff options
author | David S. Miller <davem@davemloft.net> | 2010-05-16 02:14:16 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-16 02:14:16 -0400 |
commit | 1cdc5abf40c561982d2f7b06bcff17f9496309a5 (patch) | |
tree | 92c0ca1688929a6660dc41e97df203aef23b8c09 /drivers/net/wimax/i2400m/rx.c | |
parent | e0f43752a942b7be1bc06b9fd74e20ae337c1cca (diff) | |
parent | 0fb0a4f00aaf5de9f328273d7a46e3aa27dab496 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/inaky/wimax
Diffstat (limited to 'drivers/net/wimax/i2400m/rx.c')
-rw-r--r-- | drivers/net/wimax/i2400m/rx.c | 109 |
1 files changed, 74 insertions, 35 deletions
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c index 5fa841d913b2..6537593fae66 100644 --- a/drivers/net/wimax/i2400m/rx.c +++ b/drivers/net/wimax/i2400m/rx.c | |||
@@ -155,6 +155,11 @@ | |||
155 | #define D_SUBMODULE rx | 155 | #define D_SUBMODULE rx |
156 | #include "debug-levels.h" | 156 | #include "debug-levels.h" |
157 | 157 | ||
158 | static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */ | ||
159 | module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644); | ||
160 | MODULE_PARM_DESC(rx_reorder_disabled, | ||
161 | "If true, RX reordering will be disabled."); | ||
162 | |||
158 | struct i2400m_report_hook_args { | 163 | struct i2400m_report_hook_args { |
159 | struct sk_buff *skb_rx; | 164 | struct sk_buff *skb_rx; |
160 | const struct i2400m_l3l4_hdr *l3l4_hdr; | 165 | const struct i2400m_l3l4_hdr *l3l4_hdr; |
@@ -300,17 +305,16 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m, | |||
300 | d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); | 305 | d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); |
301 | goto error_waiter_cancelled; | 306 | goto error_waiter_cancelled; |
302 | } | 307 | } |
303 | if (ack_skb == NULL) { | 308 | if (IS_ERR(ack_skb)) |
304 | dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); | 309 | dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); |
305 | i2400m->ack_skb = ERR_PTR(-ENOMEM); | 310 | i2400m->ack_skb = ack_skb; |
306 | } else | ||
307 | i2400m->ack_skb = ack_skb; | ||
308 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | 311 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
309 | complete(&i2400m->msg_completion); | 312 | complete(&i2400m->msg_completion); |
310 | return; | 313 | return; |
311 | 314 | ||
312 | error_waiter_cancelled: | 315 | error_waiter_cancelled: |
313 | kfree_skb(ack_skb); | 316 | if (!IS_ERR(ack_skb)) |
317 | kfree_skb(ack_skb); | ||
314 | error_no_waiter: | 318 | error_no_waiter: |
315 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | 319 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
316 | } | 320 | } |
@@ -741,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |||
741 | unsigned new_nws, nsn_itr; | 745 | unsigned new_nws, nsn_itr; |
742 | 746 | ||
743 | new_nws = __i2400m_roq_nsn(roq, sn); | 747 | new_nws = __i2400m_roq_nsn(roq, sn); |
744 | if (unlikely(new_nws >= 1024) && d_test(1)) { | 748 | /* |
745 | dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", | 749 | * For type 2(update_window_start) rx messages, there is no |
746 | new_nws, sn, roq->ws); | 750 | * need to check if the normalized sequence number is greater 1023. |
747 | WARN_ON(1); | 751 | * Simply insert and deliver all packets to the host up to the |
748 | i2400m_roq_log_dump(i2400m, roq); | 752 | * window start. |
749 | } | 753 | */ |
750 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { | 754 | skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { |
751 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; | 755 | roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; |
752 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); | 756 | nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); |
@@ -885,32 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq, | |||
885 | i2400m, roq, skb, sn); | 889 | i2400m, roq, skb, sn); |
886 | len = skb_queue_len(&roq->queue); | 890 | len = skb_queue_len(&roq->queue); |
887 | nsn = __i2400m_roq_nsn(roq, sn); | 891 | nsn = __i2400m_roq_nsn(roq, sn); |
892 | /* | ||
893 | * For type 3(queue_update_window_start) rx messages, there is no | ||
894 | * need to check if the normalized sequence number is greater 1023. | ||
895 | * Simply insert and deliver all packets to the host up to the | ||
896 | * window start. | ||
897 | */ | ||
888 | old_ws = roq->ws; | 898 | old_ws = roq->ws; |
889 | if (unlikely(nsn >= 1024)) { | 899 | /* If the queue is empty, don't bother as we'd queue |
890 | dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", | 900 | * it and immediately unqueue it -- just deliver it. |
891 | nsn, sn, roq->ws); | 901 | */ |
892 | i2400m_roq_log_dump(i2400m, roq); | 902 | if (len == 0) { |
893 | i2400m_reset(i2400m, I2400M_RT_WARM); | 903 | struct i2400m_roq_data *roq_data; |
894 | } else { | 904 | roq_data = (struct i2400m_roq_data *) &skb->cb; |
895 | /* if the queue is empty, don't bother as we'd queue | 905 | i2400m_net_erx(i2400m, skb, roq_data->cs); |
896 | * it and inmediately unqueue it -- just deliver it */ | 906 | } else |
897 | if (len == 0) { | 907 | __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); |
898 | struct i2400m_roq_data *roq_data; | 908 | |
899 | roq_data = (struct i2400m_roq_data *) &skb->cb; | 909 | __i2400m_roq_update_ws(i2400m, roq, sn + 1); |
900 | i2400m_net_erx(i2400m, skb, roq_data->cs); | 910 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, |
901 | } | 911 | old_ws, len, sn, nsn, roq->ws); |
902 | else | 912 | |
903 | __i2400m_roq_queue(i2400m, roq, skb, sn, nsn); | ||
904 | __i2400m_roq_update_ws(i2400m, roq, sn + 1); | ||
905 | i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS, | ||
906 | old_ws, len, sn, nsn, roq->ws); | ||
907 | } | ||
908 | d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", | 913 | d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", |
909 | i2400m, roq, skb, sn); | 914 | i2400m, roq, skb, sn); |
910 | } | 915 | } |
911 | 916 | ||
912 | 917 | ||
913 | /* | 918 | /* |
919 | * This routine destroys the memory allocated for rx_roq, when no | ||
920 | * other thread is accessing it. Access to rx_roq is refcounted by | ||
921 | * rx_roq_refcount, hence memory allocated must be destroyed when | ||
922 | * rx_roq_refcount becomes zero. This routine gets executed when | ||
923 | * rx_roq_refcount becomes zero. | ||
924 | */ | ||
925 | void i2400m_rx_roq_destroy(struct kref *ref) | ||
926 | { | ||
927 | unsigned itr; | ||
928 | struct i2400m *i2400m | ||
929 | = container_of(ref, struct i2400m, rx_roq_refcount); | ||
930 | for (itr = 0; itr < I2400M_RO_CIN + 1; itr++) | ||
931 | __skb_queue_purge(&i2400m->rx_roq[itr].queue); | ||
932 | kfree(i2400m->rx_roq[0].log); | ||
933 | kfree(i2400m->rx_roq); | ||
934 | i2400m->rx_roq = NULL; | ||
935 | } | ||
936 | |||
937 | /* | ||
914 | * Receive and send up an extended data packet | 938 | * Receive and send up an extended data packet |
915 | * | 939 | * |
916 | * @i2400m: device descriptor | 940 | * @i2400m: device descriptor |
@@ -963,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |||
963 | unsigned ro_needed, ro_type, ro_cin, ro_sn; | 987 | unsigned ro_needed, ro_type, ro_cin, ro_sn; |
964 | struct i2400m_roq *roq; | 988 | struct i2400m_roq *roq; |
965 | struct i2400m_roq_data *roq_data; | 989 | struct i2400m_roq_data *roq_data; |
990 | unsigned long flags; | ||
966 | 991 | ||
967 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); | 992 | BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); |
968 | 993 | ||
@@ -1001,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |||
1001 | ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; | 1026 | ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; |
1002 | ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; | 1027 | ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; |
1003 | 1028 | ||
1029 | spin_lock_irqsave(&i2400m->rx_lock, flags); | ||
1004 | roq = &i2400m->rx_roq[ro_cin]; | 1030 | roq = &i2400m->rx_roq[ro_cin]; |
1031 | if (roq == NULL) { | ||
1032 | kfree_skb(skb); /* rx_roq is already destroyed */ | ||
1033 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
1034 | goto error; | ||
1035 | } | ||
1036 | kref_get(&i2400m->rx_roq_refcount); | ||
1037 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
1038 | |||
1005 | roq_data = (struct i2400m_roq_data *) &skb->cb; | 1039 | roq_data = (struct i2400m_roq_data *) &skb->cb; |
1006 | roq_data->sn = ro_sn; | 1040 | roq_data->sn = ro_sn; |
1007 | roq_data->cs = cs; | 1041 | roq_data->cs = cs; |
@@ -1028,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx, | |||
1028 | default: | 1062 | default: |
1029 | dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); | 1063 | dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); |
1030 | } | 1064 | } |
1065 | |||
1066 | spin_lock_irqsave(&i2400m->rx_lock, flags); | ||
1067 | kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); | ||
1068 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); | ||
1031 | } | 1069 | } |
1032 | else | 1070 | else |
1033 | i2400m_net_erx(i2400m, skb, cs); | 1071 | i2400m_net_erx(i2400m, skb, cs); |
@@ -1337,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m) | |||
1337 | __i2400m_roq_init(&i2400m->rx_roq[itr]); | 1375 | __i2400m_roq_init(&i2400m->rx_roq[itr]); |
1338 | i2400m->rx_roq[itr].log = &rd[itr]; | 1376 | i2400m->rx_roq[itr].log = &rd[itr]; |
1339 | } | 1377 | } |
1378 | kref_init(&i2400m->rx_roq_refcount); | ||
1340 | } | 1379 | } |
1341 | return 0; | 1380 | return 0; |
1342 | 1381 | ||
@@ -1350,12 +1389,12 @@ error_roq_alloc: | |||
1350 | /* Tear down the RX queue and infrastructure */ | 1389 | /* Tear down the RX queue and infrastructure */ |
1351 | void i2400m_rx_release(struct i2400m *i2400m) | 1390 | void i2400m_rx_release(struct i2400m *i2400m) |
1352 | { | 1391 | { |
1392 | unsigned long flags; | ||
1393 | |||
1353 | if (i2400m->rx_reorder) { | 1394 | if (i2400m->rx_reorder) { |
1354 | unsigned itr; | 1395 | spin_lock_irqsave(&i2400m->rx_lock, flags); |
1355 | for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) | 1396 | kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy); |
1356 | __skb_queue_purge(&i2400m->rx_roq[itr].queue); | 1397 | spin_unlock_irqrestore(&i2400m->rx_lock, flags); |
1357 | kfree(i2400m->rx_roq[0].log); | ||
1358 | kfree(i2400m->rx_roq); | ||
1359 | } | 1398 | } |
1360 | /* at this point, nothing can be received... */ | 1399 | /* at this point, nothing can be received... */ |
1361 | i2400m_report_hook_flush(i2400m); | 1400 | i2400m_report_hook_flush(i2400m); |