aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/netxen/netxen_nic_init.c
diff options
context:
space:
mode:
authorDhananjay Phadke <dhananjay@netxen.com>2008-03-17 22:59:50 -0400
committerJeff Garzik <jeff@garzik.org>2008-03-25 23:16:18 -0400
commitba53e6b4878e07411826312c59bfe49561594b6e (patch)
treef9275465c58ebdea37a81ef4796ed29e792bea81 /drivers/net/netxen/netxen_nic_init.c
parent05aaa02d799e8e9548d57ac92fcb05e783027341 (diff)
netxen: remove low level tx lock
o eliminate tx lock in netxen adapter struct, instead pound on netdev tx lock appropriately. o remove old "concurrent transmit" code that unnecessarily drops and reacquires tx lock in hard_xmit_frame(), this is already serialized the netdev xmit lock. o reduce scope of tx lock in tx cleanup. tx cleanup operates on different section of the ring than transmitting cpus and is guarded by producer and consumer indices. This fixes a race caused by rx softirq preemption on realtime kernels. Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com> Tested-by: Vernon Mauery <mauery@us.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r--drivers/net/netxen/netxen_nic_init.c89
1 files changed, 18 insertions, 71 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64fc18d4afb6..fe646187aa86 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1197,96 +1197,50 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1197/* Process Command status ring */ 1197/* Process Command status ring */
1198int netxen_process_cmd_ring(struct netxen_adapter *adapter) 1198int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1199{ 1199{
1200 u32 last_consumer; 1200 u32 last_consumer, consumer;
1201 u32 consumer; 1201 int count = 0, i;
1202 int count1 = 0;
1203 int count2 = 0;
1204 struct netxen_cmd_buffer *buffer; 1202 struct netxen_cmd_buffer *buffer;
1205 struct pci_dev *pdev; 1203 struct pci_dev *pdev = adapter->pdev;
1204 struct net_device *netdev = adapter->netdev;
1206 struct netxen_skb_frag *frag; 1205 struct netxen_skb_frag *frag;
1207 u32 i; 1206 int done = 0;
1208 int done;
1209 1207
1210 spin_lock(&adapter->tx_lock);
1211 last_consumer = adapter->last_cmd_consumer; 1208 last_consumer = adapter->last_cmd_consumer;
1212 DPRINTK(INFO, "procesing xmit complete\n");
1213 /* we assume in this case that there is only one port and that is
1214 * port #1...changes need to be done in firmware to indicate port
1215 * number as part of the descriptor. This way we will be able to get
1216 * the netdev which is associated with that device.
1217 */
1218
1219 consumer = le32_to_cpu(*(adapter->cmd_consumer)); 1209 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1220 if (last_consumer == consumer) { /* Ring is empty */
1221 DPRINTK(INFO, "last_consumer %d == consumer %d\n",
1222 last_consumer, consumer);
1223 spin_unlock(&adapter->tx_lock);
1224 return 1;
1225 }
1226
1227 adapter->proc_cmd_buf_counter++;
1228 /*
1229 * Not needed - does not seem to be used anywhere.
1230 * adapter->cmd_consumer = consumer;
1231 */
1232 spin_unlock(&adapter->tx_lock);
1233 1210
1234 while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) { 1211 while (last_consumer != consumer) {
1235 buffer = &adapter->cmd_buf_arr[last_consumer]; 1212 buffer = &adapter->cmd_buf_arr[last_consumer];
1236 pdev = adapter->pdev;
1237 if (buffer->skb) { 1213 if (buffer->skb) {
1238 frag = &buffer->frag_array[0]; 1214 frag = &buffer->frag_array[0];
1239 pci_unmap_single(pdev, frag->dma, frag->length, 1215 pci_unmap_single(pdev, frag->dma, frag->length,
1240 PCI_DMA_TODEVICE); 1216 PCI_DMA_TODEVICE);
1241 frag->dma = 0ULL; 1217 frag->dma = 0ULL;
1242 for (i = 1; i < buffer->frag_count; i++) { 1218 for (i = 1; i < buffer->frag_count; i++) {
1243 DPRINTK(INFO, "getting fragment no %d\n", i);
1244 frag++; /* Get the next frag */ 1219 frag++; /* Get the next frag */
1245 pci_unmap_page(pdev, frag->dma, frag->length, 1220 pci_unmap_page(pdev, frag->dma, frag->length,
1246 PCI_DMA_TODEVICE); 1221 PCI_DMA_TODEVICE);
1247 frag->dma = 0ULL; 1222 frag->dma = 0ULL;
1248 } 1223 }
1249 1224
1250 adapter->stats.skbfreed++; 1225 adapter->stats.xmitfinished++;
1251 dev_kfree_skb_any(buffer->skb); 1226 dev_kfree_skb_any(buffer->skb);
1252 buffer->skb = NULL; 1227 buffer->skb = NULL;
1253 } else if (adapter->proc_cmd_buf_counter == 1) {
1254 adapter->stats.txnullskb++;
1255 }
1256 if (unlikely(netif_queue_stopped(adapter->netdev)
1257 && netif_carrier_ok(adapter->netdev))
1258 && ((jiffies - adapter->netdev->trans_start) >
1259 adapter->netdev->watchdog_timeo)) {
1260 SCHEDULE_WORK(&adapter->tx_timeout_task);
1261 } 1228 }
1262 1229
1263 last_consumer = get_next_index(last_consumer, 1230 last_consumer = get_next_index(last_consumer,
1264 adapter->max_tx_desc_count); 1231 adapter->max_tx_desc_count);
1265 count1++; 1232 if (++count >= MAX_STATUS_HANDLE)
1233 break;
1266 } 1234 }
1267 1235
1268 count2 = 0; 1236 if (count) {
1269 spin_lock(&adapter->tx_lock);
1270 if ((--adapter->proc_cmd_buf_counter) == 0) {
1271 adapter->last_cmd_consumer = last_consumer; 1237 adapter->last_cmd_consumer = last_consumer;
1272 while ((adapter->last_cmd_consumer != consumer) 1238 smp_mb();
1273 && (count2 < MAX_STATUS_HANDLE)) { 1239 if (netif_queue_stopped(netdev) && netif_running(netdev)) {
1274 buffer = 1240 netif_tx_lock(netdev);
1275 &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; 1241 netif_wake_queue(netdev);
1276 count2++; 1242 smp_mb();
1277 if (buffer->skb) 1243 netif_tx_unlock(netdev);
1278 break;
1279 else
1280 adapter->last_cmd_consumer =
1281 get_next_index(adapter->last_cmd_consumer,
1282 adapter->max_tx_desc_count);
1283 }
1284 }
1285 if (count1 || count2) {
1286 if (netif_queue_stopped(adapter->netdev)
1287 && (adapter->flags & NETXEN_NETDEV_STATUS)) {
1288 netif_wake_queue(adapter->netdev);
1289 adapter->flags &= ~NETXEN_NETDEV_STATUS;
1290 } 1244 }
1291 } 1245 }
1292 /* 1246 /*
@@ -1302,16 +1256,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
1302 * There is still a possible race condition and the host could miss an 1256 * There is still a possible race condition and the host could miss an
1303 * interrupt. The card has to take care of this. 1257 * interrupt. The card has to take care of this.
1304 */ 1258 */
1305 if (adapter->last_cmd_consumer == consumer && 1259 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1306 (((adapter->cmd_producer + 1) % 1260 done = (last_consumer == consumer);
1307 adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) {
1308 consumer = le32_to_cpu(*(adapter->cmd_consumer));
1309 }
1310 done = (adapter->last_cmd_consumer == consumer);
1311 1261
1312 spin_unlock(&adapter->tx_lock);
1313 DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer,
1314 __FUNCTION__);
1315 return (done); 1262 return (done);
1316} 1263}
1317 1264