aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/libertas/tx.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2007-12-09 23:54:27 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:06:30 -0500
commit2eb188a1c57ae79283cee951c317bd191cf1ca56 (patch)
tree6fa459e2d171f035c97af88bfa5957dd544b4413 /drivers/net/wireless/libertas/tx.c
parentb8d40bc9c9099943cbcf18d285bf241f1f080a44 (diff)
libertas: Move actual transmission to main thread
The locking issues with TX, especially TX from multiple netdevs, get _so_ much easier if you do it like this. Signed-off-by: David Woodhouse <dwmw2@infradead.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/libertas/tx.c')
-rw-r--r--drivers/net/wireless/libertas/tx.c103
1 files changed, 43 insertions, 60 deletions
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 300aa05edeb9..e2141f0a67d4 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -67,39 +67,45 @@ int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
67 67
68 lbs_deb_enter(LBS_DEB_TX); 68 lbs_deb_enter(LBS_DEB_TX);
69 69
70 ret = NETDEV_TX_BUSY; 70 ret = NETDEV_TX_OK;
71
72 if (priv->dnld_sent) {
73 lbs_pr_alert( "TX error: dnld_sent = %d, not sending\n",
74 priv->dnld_sent);
75 goto done;
76 }
77
78 if (priv->currenttxskb) {
79 lbs_pr_err("%s while TX skb pending\n", __func__);
80 goto done;
81 }
82 71
83 if ((priv->psstate == PS_STATE_SLEEP) || 72 /* We need to protect against the queues being restarted before
84 (priv->psstate == PS_STATE_PRE_SLEEP)) { 73 we get round to stopping them */
85 lbs_pr_alert("TX error: packet xmit in %ssleep mode\n", 74 spin_lock_irqsave(&priv->driver_lock, flags);
86 priv->psstate == PS_STATE_SLEEP?"":"pre-");
87 goto done;
88 }
89 75
90 if (priv->surpriseremoved) 76 if (priv->surpriseremoved)
91 goto drop; 77 goto free;
92 78
93 if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) { 79 if (!skb->len || (skb->len > MRVDRV_ETH_TX_PACKET_BUFFER_SIZE)) {
94 lbs_deb_tx("tx err: skb length %d 0 or > %zd\n", 80 lbs_deb_tx("tx err: skb length %d 0 or > %zd\n",
95 skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE); 81 skb->len, MRVDRV_ETH_TX_PACKET_BUFFER_SIZE);
96 /* We'll never manage to send this one; drop it and return 'OK' */ 82 /* We'll never manage to send this one; drop it and return 'OK' */
97 goto drop; 83
84 priv->stats.tx_dropped++;
85 priv->stats.tx_errors++;
86 goto free;
87 }
88
89
90 netif_stop_queue(priv->dev);
91 if (priv->mesh_dev)
92 netif_stop_queue(priv->mesh_dev);
93
94 if (priv->tx_pending_len) {
95 /* This can happen if packets come in on the mesh and eth
96 device simultaneously -- there's no mutual exclusion on
97 hard_start_xmit() calls between devices. */
98 lbs_deb_tx("Packet on %s while busy\n", dev->name);
99 ret = NETDEV_TX_BUSY;
100 goto unlock;
98 } 101 }
99 102
103 priv->tx_pending_len = -1;
104 spin_unlock_irqrestore(&priv->driver_lock, flags);
105
100 lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100)); 106 lbs_deb_hex(LBS_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
101 107
102 txpd = (void *)priv->tmptxbuf; 108 txpd = (void *)priv->tx_pending_buf;
103 memset(txpd, 0, sizeof(struct txpd)); 109 memset(txpd, 0, sizeof(struct txpd));
104 110
105 p802x_hdr = skb->data; 111 p802x_hdr = skb->data;
@@ -134,54 +140,31 @@ int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
134 140
135 memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length)); 141 memcpy(&txpd[1], p802x_hdr, le16_to_cpu(txpd->tx_packet_length));
136 142
137 /* We need to protect against the queues being restarted before
138 we get round to stopping them */
139 spin_lock_irqsave(&priv->driver_lock, flags); 143 spin_lock_irqsave(&priv->driver_lock, flags);
144 priv->tx_pending_len = pkt_len + sizeof(struct txpd);
140 145
141 ret = priv->hw_host_to_card(priv, MVMS_DAT, priv->tmptxbuf, 146 lbs_deb_tx("%s lined up packet\n", __func__);
142 pkt_len + sizeof(struct txpd));
143
144 if (!ret) {
145 lbs_deb_tx("%s succeeds\n", __func__);
146
147 /* Stop processing outgoing pkts before submitting */
148 netif_stop_queue(priv->dev);
149 if (priv->mesh_dev)
150 netif_stop_queue(priv->mesh_dev);
151
152 priv->stats.tx_packets++;
153 priv->stats.tx_bytes += skb->len;
154
155 dev->trans_start = jiffies;
156 147
157 if (priv->monitormode != LBS_MONITOR_OFF) { 148 priv->stats.tx_packets++;
158 /* Keep the skb to echo it back once Tx feedback is 149 priv->stats.tx_bytes += skb->len;
159 received from FW */
160 skb_orphan(skb);
161 150
162 /* Keep the skb around for when we get feedback */ 151 dev->trans_start = jiffies;
163 priv->currenttxskb = skb;
164 } else
165 dev_kfree_skb_any(skb);
166
167 }
168
169 spin_unlock_irqrestore(&priv->driver_lock, flags);
170 152
171 if (ret) { 153 if (priv->monitormode != LBS_MONITOR_OFF) {
172 lbs_deb_tx("tx err: hw_host_to_card returned 0x%X\n", ret); 154 /* Keep the skb to echo it back once Tx feedback is
173drop: 155 received from FW */
174 priv->stats.tx_dropped++; 156 skb_orphan(skb);
175 priv->stats.tx_errors++;
176 157
158 /* Keep the skb around for when we get feedback */
159 priv->currenttxskb = skb;
160 } else {
161 free:
177 dev_kfree_skb_any(skb); 162 dev_kfree_skb_any(skb);
178 } 163 }
164 unlock:
165 spin_unlock_irqrestore(&priv->driver_lock, flags);
166 wake_up(&priv->waitq);
179 167
180 /* Even if we dropped the packet, return OK. Otherwise the
181 packet gets requeued. */
182 ret = NETDEV_TX_OK;
183
184done:
185 lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret); 168 lbs_deb_leave_args(LBS_DEB_TX, "ret %d", ret);
186 return ret; 169 return ret;
187} 170}