aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wimax/i2400m/rx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 00:04:44 -0400
commitf8965467f366fd18f01feafb5db10512d7b4422c (patch)
tree3706a9cd779859271ca61b85c63a1bc3f82d626e /drivers/net/wimax/i2400m/rx.c
parenta26272e5200765691e67d6780e52b32498fdb659 (diff)
parent2ec8c6bb5d8f3a62a79f463525054bae1e3d4487 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1674 commits) qlcnic: adding co maintainer ixgbe: add support for active DA cables ixgbe: dcb, do not tag tc_prio_control frames ixgbe: fix ixgbe_tx_is_paused logic ixgbe: always enable vlan strip/insert when DCB is enabled ixgbe: remove some redundant code in setting FCoE FIP filter ixgbe: fix wrong offset to fc_frame_header in ixgbe_fcoe_ddp ixgbe: fix header len when unsplit packet overflows to data buffer ipv6: Never schedule DAD timer on dead address ipv6: Use POSTDAD state ipv6: Use state_lock to protect ifa state ipv6: Replace inet6_ifaddr->dead with state cxgb4: notify upper drivers if the device is already up when they load cxgb4: keep interrupts available when the ports are brought down cxgb4: fix initial addition of MAC address cnic: Return SPQ credit to bnx2x after ring setup and shutdown. cnic: Convert cnic_local_flags to atomic ops. can: Fix SJA1000 command register writes on SMP systems bridge: fix build for CONFIG_SYSFS disabled ARCNET: Limit com20020 PCI ID matches for SOHARD cards ... Fix up various conflicts with pcmcia tree drivers/net/ {pcmcia/3c589_cs.c, wireless/orinoco/orinoco_cs.c and wireless/orinoco/spectrum_cs.c} and feature removal (Documentation/feature-removal-schedule.txt). Also fix a non-content conflict due to pm_qos_requirement getting renamed in the PM tree (now pm_qos_request) in net/mac80211/scan.c
Diffstat (limited to 'drivers/net/wimax/i2400m/rx.c')
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
1 files changed, 74 insertions, 42 deletions
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b9..6537593fae66 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
155#define D_SUBMODULE rx 155#define D_SUBMODULE rx
156#include "debug-levels.h" 156#include "debug-levels.h"
157 157
158static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
159module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
160MODULE_PARM_DESC(rx_reorder_disabled,
161 "If true, RX reordering will be disabled.");
162
158struct i2400m_report_hook_args { 163struct i2400m_report_hook_args {
159 struct sk_buff *skb_rx; 164 struct sk_buff *skb_rx;
160 const struct i2400m_l3l4_hdr *l3l4_hdr; 165 const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
300 d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); 305 d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
301 goto error_waiter_cancelled; 306 goto error_waiter_cancelled;
302 } 307 }
303 if (ack_skb == NULL) { 308 if (IS_ERR(ack_skb))
304 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); 309 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
305 i2400m->ack_skb = ERR_PTR(-ENOMEM); 310 i2400m->ack_skb = ack_skb;
306 } else
307 i2400m->ack_skb = ack_skb;
308 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 311 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
309 complete(&i2400m->msg_completion); 312 complete(&i2400m->msg_completion);
310 return; 313 return;
311 314
312error_waiter_cancelled: 315error_waiter_cancelled:
313 kfree_skb(ack_skb); 316 if (!IS_ERR(ack_skb))
317 kfree_skb(ack_skb);
314error_no_waiter: 318error_no_waiter:
315 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 319 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
316 return;
317} 320}
318 321
319 322
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
718out: 721out:
719 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", 722 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
720 i2400m, roq, skb, sn, nsn); 723 i2400m, roq, skb, sn, nsn);
721 return;
722} 724}
723 725
724 726
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
743 unsigned new_nws, nsn_itr; 745 unsigned new_nws, nsn_itr;
744 746
745 new_nws = __i2400m_roq_nsn(roq, sn); 747 new_nws = __i2400m_roq_nsn(roq, sn);
746 if (unlikely(new_nws >= 1024) && d_test(1)) { 748 /*
747 dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", 749 * For type 2(update_window_start) rx messages, there is no
748 new_nws, sn, roq->ws); 750 * need to check if the normalized sequence number is greater 1023.
749 WARN_ON(1); 751 * Simply insert and deliver all packets to the host up to the
750 i2400m_roq_log_dump(i2400m, roq); 752 * window start.
751 } 753 */
752 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { 754 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
753 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; 755 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
754 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); 756 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
798 } 800 }
799 roq->ws = 0; 801 roq->ws = 0;
800 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); 802 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
801 return;
802} 803}
803 804
804 805
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
837 } 838 }
838 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", 839 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
839 i2400m, roq, skb, lbn); 840 i2400m, roq, skb, lbn);
840 return;
841} 841}
842 842
843 843
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, 863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
864 old_ws, len, sn, nsn, roq->ws); 864 old_ws, len, sn, nsn, roq->ws);
865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); 865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
866 return;
867} 866}
868 867
869 868
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
890 i2400m, roq, skb, sn); 889 i2400m, roq, skb, sn);
891 len = skb_queue_len(&roq->queue); 890 len = skb_queue_len(&roq->queue);
892 nsn = __i2400m_roq_nsn(roq, sn); 891 nsn = __i2400m_roq_nsn(roq, sn);
892 /*
893 * For type 3(queue_update_window_start) rx messages, there is no
894 * need to check if the normalized sequence number is greater 1023.
895 * Simply insert and deliver all packets to the host up to the
896 * window start.
897 */
893 old_ws = roq->ws; 898 old_ws = roq->ws;
894 if (unlikely(nsn >= 1024)) { 899 /* If the queue is empty, don't bother as we'd queue
895 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", 900 * it and immediately unqueue it -- just deliver it.
896 nsn, sn, roq->ws); 901 */
897 i2400m_roq_log_dump(i2400m, roq); 902 if (len == 0) {
898 i2400m_reset(i2400m, I2400M_RT_WARM); 903 struct i2400m_roq_data *roq_data;
899 } else { 904 roq_data = (struct i2400m_roq_data *) &skb->cb;
900 /* if the queue is empty, don't bother as we'd queue 905 i2400m_net_erx(i2400m, skb, roq_data->cs);
901 * it and inmediately unqueue it -- just deliver it */ 906 } else
902 if (len == 0) { 907 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
903 struct i2400m_roq_data *roq_data; 908
904 roq_data = (struct i2400m_roq_data *) &skb->cb; 909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
905 i2400m_net_erx(i2400m, skb, roq_data->cs); 910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
906 } 911 old_ws, len, sn, nsn, roq->ws);
907 else 912
908 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
911 old_ws, len, sn, nsn, roq->ws);
912 }
913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", 913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
914 i2400m, roq, skb, sn); 914 i2400m, roq, skb, sn);
915 return;
916} 915}
917 916
918 917
919/* 918/*
919 * This routine destroys the memory allocated for rx_roq, when no
920 * other thread is accessing it. Access to rx_roq is refcounted by
921 * rx_roq_refcount, hence memory allocated must be destroyed when
922 * rx_roq_refcount becomes zero. This routine gets executed when
923 * rx_roq_refcount becomes zero.
924 */
925void i2400m_rx_roq_destroy(struct kref *ref)
926{
927 unsigned itr;
928 struct i2400m *i2400m
929 = container_of(ref, struct i2400m, rx_roq_refcount);
930 for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
931 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
932 kfree(i2400m->rx_roq[0].log);
933 kfree(i2400m->rx_roq);
934 i2400m->rx_roq = NULL;
935}
936
937/*
920 * Receive and send up an extended data packet 938 * Receive and send up an extended data packet
921 * 939 *
922 * @i2400m: device descriptor 940 * @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
969 unsigned ro_needed, ro_type, ro_cin, ro_sn; 987 unsigned ro_needed, ro_type, ro_cin, ro_sn;
970 struct i2400m_roq *roq; 988 struct i2400m_roq *roq;
971 struct i2400m_roq_data *roq_data; 989 struct i2400m_roq_data *roq_data;
990 unsigned long flags;
972 991
973 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); 992 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
974 993
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1007 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; 1026 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
1008 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1009 1028
1029 spin_lock_irqsave(&i2400m->rx_lock, flags);
1010 roq = &i2400m->rx_roq[ro_cin]; 1030 roq = &i2400m->rx_roq[ro_cin];
1031 if (roq == NULL) {
1032 kfree_skb(skb); /* rx_roq is already destroyed */
1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1034 goto error;
1035 }
1036 kref_get(&i2400m->rx_roq_refcount);
1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1038
1011 roq_data = (struct i2400m_roq_data *) &skb->cb; 1039 roq_data = (struct i2400m_roq_data *) &skb->cb;
1012 roq_data->sn = ro_sn; 1040 roq_data->sn = ro_sn;
1013 roq_data->cs = cs; 1041 roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1034 default: 1062 default:
1035 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); 1063 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
1036 } 1064 }
1065
1066 spin_lock_irqsave(&i2400m->rx_lock, flags);
1067 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1068 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1037 } 1069 }
1038 else 1070 else
1039 i2400m_net_erx(i2400m, skb, cs); 1071 i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
1041error: 1073error:
1042 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " 1074 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
1043 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); 1075 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1044 return;
1045} 1076}
1046 1077
1047 1078
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
1344 __i2400m_roq_init(&i2400m->rx_roq[itr]); 1375 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1345 i2400m->rx_roq[itr].log = &rd[itr]; 1376 i2400m->rx_roq[itr].log = &rd[itr];
1346 } 1377 }
1378 kref_init(&i2400m->rx_roq_refcount);
1347 } 1379 }
1348 return 0; 1380 return 0;
1349 1381
@@ -1357,12 +1389,12 @@ error_roq_alloc:
1357/* Tear down the RX queue and infrastructure */ 1389/* Tear down the RX queue and infrastructure */
1358void i2400m_rx_release(struct i2400m *i2400m) 1390void i2400m_rx_release(struct i2400m *i2400m)
1359{ 1391{
1392 unsigned long flags;
1393
1360 if (i2400m->rx_reorder) { 1394 if (i2400m->rx_reorder) {
1361 unsigned itr; 1395 spin_lock_irqsave(&i2400m->rx_lock, flags);
1362 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) 1396 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1363 __skb_queue_purge(&i2400m->rx_roq[itr].queue); 1397 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1364 kfree(i2400m->rx_roq[0].log);
1365 kfree(i2400m->rx_roq);
1366 } 1398 }
1367 /* at this point, nothing can be received... */ 1399 /* at this point, nothing can be received... */
1368 i2400m_report_hook_flush(i2400m); 1400 i2400m_report_hook_flush(i2400m);