diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2008-03-17 22:59:50 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2008-03-25 23:16:18 -0400 |
commit | ba53e6b4878e07411826312c59bfe49561594b6e (patch) | |
tree | f9275465c58ebdea37a81ef4796ed29e792bea81 /drivers/net/netxen | |
parent | 05aaa02d799e8e9548d57ac92fcb05e783027341 (diff) |
netxen: remove low level tx lock
o eliminate tx lock in netxen adapter struct, instead pound on netdev
tx lock appropriately.
o remove old "concurrent transmit" code that unnecessarily drops and
reacquires tx lock in hard_xmit_frame(), this is already serialized
the netdev xmit lock.
o reduce scope of tx lock in tx cleanup. tx cleanup operates on
different section of the ring than transmitting cpus and is
guarded by producer and consumer indices. This fixes a race
caused by rx softirq preemption on realtime kernels.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Tested-by: Vernon Mauery <mauery@us.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/netxen')
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 14 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 89 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 95 |
4 files changed, 43 insertions, 157 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 8b6546ccb47b..070421b9e4f9 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -85,7 +85,7 @@ | |||
85 | (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) | 85 | (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) |
86 | #define RCV_BUFFSIZE \ | 86 | #define RCV_BUFFSIZE \ |
87 | (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) | 87 | (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) |
88 | #define find_diff_among(a,b,range) ((a)<=(b)?((b)-(a)):((b)+(range)-(a))) | 88 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) |
89 | 89 | ||
90 | #define NETXEN_NETDEV_STATUS 0x1 | 90 | #define NETXEN_NETDEV_STATUS 0x1 |
91 | #define NETXEN_RCV_PRODUCER_OFFSET 0 | 91 | #define NETXEN_RCV_PRODUCER_OFFSET 0 |
@@ -204,7 +204,7 @@ enum { | |||
204 | ? RCV_DESC_LRO : \ | 204 | ? RCV_DESC_LRO : \ |
205 | (RCV_DESC_NORMAL))) | 205 | (RCV_DESC_NORMAL))) |
206 | 206 | ||
207 | #define MAX_CMD_DESCRIPTORS 1024 | 207 | #define MAX_CMD_DESCRIPTORS 4096 |
208 | #define MAX_RCV_DESCRIPTORS 16384 | 208 | #define MAX_RCV_DESCRIPTORS 16384 |
209 | #define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) | 209 | #define MAX_CMD_DESCRIPTORS_HOST (MAX_CMD_DESCRIPTORS / 4) |
210 | #define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) | 210 | #define MAX_RCV_DESCRIPTORS_1G (MAX_RCV_DESCRIPTORS / 4) |
@@ -824,9 +824,7 @@ struct netxen_adapter_stats { | |||
824 | u64 uphcong; | 824 | u64 uphcong; |
825 | u64 upmcong; | 825 | u64 upmcong; |
826 | u64 updunno; | 826 | u64 updunno; |
827 | u64 skbfreed; | ||
828 | u64 txdropped; | 827 | u64 txdropped; |
829 | u64 txnullskb; | ||
830 | u64 csummed; | 828 | u64 csummed; |
831 | u64 no_rcv; | 829 | u64 no_rcv; |
832 | u64 rxbytes; | 830 | u64 rxbytes; |
@@ -888,8 +886,6 @@ struct netxen_adapter { | |||
888 | int mtu; | 886 | int mtu; |
889 | int portnum; | 887 | int portnum; |
890 | 888 | ||
891 | spinlock_t tx_lock; | ||
892 | spinlock_t lock; | ||
893 | struct work_struct watchdog_task; | 889 | struct work_struct watchdog_task; |
894 | struct timer_list watchdog_timer; | 890 | struct timer_list watchdog_timer; |
895 | struct work_struct tx_timeout_task; | 891 | struct work_struct tx_timeout_task; |
@@ -898,16 +894,12 @@ struct netxen_adapter { | |||
898 | 894 | ||
899 | u32 cmd_producer; | 895 | u32 cmd_producer; |
900 | __le32 *cmd_consumer; | 896 | __le32 *cmd_consumer; |
901 | |||
902 | u32 last_cmd_consumer; | 897 | u32 last_cmd_consumer; |
898 | |||
903 | u32 max_tx_desc_count; | 899 | u32 max_tx_desc_count; |
904 | u32 max_rx_desc_count; | 900 | u32 max_rx_desc_count; |
905 | u32 max_jumbo_rx_desc_count; | 901 | u32 max_jumbo_rx_desc_count; |
906 | u32 max_lro_rx_desc_count; | 902 | u32 max_lro_rx_desc_count; |
907 | /* Num of instances active on cmd buffer ring */ | ||
908 | u32 proc_cmd_buf_counter; | ||
909 | |||
910 | u32 num_threads, total_threads; /*Use to keep track of xmit threads */ | ||
911 | 903 | ||
912 | u32 flags; | 904 | u32 flags; |
913 | u32 irq; | 905 | u32 irq; |
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 7a876f4b8db2..d324ea3bc7ba 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -70,9 +70,7 @@ static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = { | |||
70 | {"uphcong", NETXEN_NIC_STAT(stats.uphcong)}, | 70 | {"uphcong", NETXEN_NIC_STAT(stats.uphcong)}, |
71 | {"upmcong", NETXEN_NIC_STAT(stats.upmcong)}, | 71 | {"upmcong", NETXEN_NIC_STAT(stats.upmcong)}, |
72 | {"updunno", NETXEN_NIC_STAT(stats.updunno)}, | 72 | {"updunno", NETXEN_NIC_STAT(stats.updunno)}, |
73 | {"skb_freed", NETXEN_NIC_STAT(stats.skbfreed)}, | ||
74 | {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, | 73 | {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)}, |
75 | {"tx_null_skb", NETXEN_NIC_STAT(stats.txnullskb)}, | ||
76 | {"csummed", NETXEN_NIC_STAT(stats.csummed)}, | 74 | {"csummed", NETXEN_NIC_STAT(stats.csummed)}, |
77 | {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)}, | 75 | {"no_rcv", NETXEN_NIC_STAT(stats.no_rcv)}, |
78 | {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, | 76 | {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)}, |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 64fc18d4afb6..fe646187aa86 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -1197,96 +1197,50 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1197 | /* Process Command status ring */ | 1197 | /* Process Command status ring */ |
1198 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) | 1198 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) |
1199 | { | 1199 | { |
1200 | u32 last_consumer; | 1200 | u32 last_consumer, consumer; |
1201 | u32 consumer; | 1201 | int count = 0, i; |
1202 | int count1 = 0; | ||
1203 | int count2 = 0; | ||
1204 | struct netxen_cmd_buffer *buffer; | 1202 | struct netxen_cmd_buffer *buffer; |
1205 | struct pci_dev *pdev; | 1203 | struct pci_dev *pdev = adapter->pdev; |
1204 | struct net_device *netdev = adapter->netdev; | ||
1206 | struct netxen_skb_frag *frag; | 1205 | struct netxen_skb_frag *frag; |
1207 | u32 i; | 1206 | int done = 0; |
1208 | int done; | ||
1209 | 1207 | ||
1210 | spin_lock(&adapter->tx_lock); | ||
1211 | last_consumer = adapter->last_cmd_consumer; | 1208 | last_consumer = adapter->last_cmd_consumer; |
1212 | DPRINTK(INFO, "procesing xmit complete\n"); | ||
1213 | /* we assume in this case that there is only one port and that is | ||
1214 | * port #1...changes need to be done in firmware to indicate port | ||
1215 | * number as part of the descriptor. This way we will be able to get | ||
1216 | * the netdev which is associated with that device. | ||
1217 | */ | ||
1218 | |||
1219 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1209 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
1220 | if (last_consumer == consumer) { /* Ring is empty */ | ||
1221 | DPRINTK(INFO, "last_consumer %d == consumer %d\n", | ||
1222 | last_consumer, consumer); | ||
1223 | spin_unlock(&adapter->tx_lock); | ||
1224 | return 1; | ||
1225 | } | ||
1226 | |||
1227 | adapter->proc_cmd_buf_counter++; | ||
1228 | /* | ||
1229 | * Not needed - does not seem to be used anywhere. | ||
1230 | * adapter->cmd_consumer = consumer; | ||
1231 | */ | ||
1232 | spin_unlock(&adapter->tx_lock); | ||
1233 | 1210 | ||
1234 | while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) { | 1211 | while (last_consumer != consumer) { |
1235 | buffer = &adapter->cmd_buf_arr[last_consumer]; | 1212 | buffer = &adapter->cmd_buf_arr[last_consumer]; |
1236 | pdev = adapter->pdev; | ||
1237 | if (buffer->skb) { | 1213 | if (buffer->skb) { |
1238 | frag = &buffer->frag_array[0]; | 1214 | frag = &buffer->frag_array[0]; |
1239 | pci_unmap_single(pdev, frag->dma, frag->length, | 1215 | pci_unmap_single(pdev, frag->dma, frag->length, |
1240 | PCI_DMA_TODEVICE); | 1216 | PCI_DMA_TODEVICE); |
1241 | frag->dma = 0ULL; | 1217 | frag->dma = 0ULL; |
1242 | for (i = 1; i < buffer->frag_count; i++) { | 1218 | for (i = 1; i < buffer->frag_count; i++) { |
1243 | DPRINTK(INFO, "getting fragment no %d\n", i); | ||
1244 | frag++; /* Get the next frag */ | 1219 | frag++; /* Get the next frag */ |
1245 | pci_unmap_page(pdev, frag->dma, frag->length, | 1220 | pci_unmap_page(pdev, frag->dma, frag->length, |
1246 | PCI_DMA_TODEVICE); | 1221 | PCI_DMA_TODEVICE); |
1247 | frag->dma = 0ULL; | 1222 | frag->dma = 0ULL; |
1248 | } | 1223 | } |
1249 | 1224 | ||
1250 | adapter->stats.skbfreed++; | 1225 | adapter->stats.xmitfinished++; |
1251 | dev_kfree_skb_any(buffer->skb); | 1226 | dev_kfree_skb_any(buffer->skb); |
1252 | buffer->skb = NULL; | 1227 | buffer->skb = NULL; |
1253 | } else if (adapter->proc_cmd_buf_counter == 1) { | ||
1254 | adapter->stats.txnullskb++; | ||
1255 | } | ||
1256 | if (unlikely(netif_queue_stopped(adapter->netdev) | ||
1257 | && netif_carrier_ok(adapter->netdev)) | ||
1258 | && ((jiffies - adapter->netdev->trans_start) > | ||
1259 | adapter->netdev->watchdog_timeo)) { | ||
1260 | SCHEDULE_WORK(&adapter->tx_timeout_task); | ||
1261 | } | 1228 | } |
1262 | 1229 | ||
1263 | last_consumer = get_next_index(last_consumer, | 1230 | last_consumer = get_next_index(last_consumer, |
1264 | adapter->max_tx_desc_count); | 1231 | adapter->max_tx_desc_count); |
1265 | count1++; | 1232 | if (++count >= MAX_STATUS_HANDLE) |
1233 | break; | ||
1266 | } | 1234 | } |
1267 | 1235 | ||
1268 | count2 = 0; | 1236 | if (count) { |
1269 | spin_lock(&adapter->tx_lock); | ||
1270 | if ((--adapter->proc_cmd_buf_counter) == 0) { | ||
1271 | adapter->last_cmd_consumer = last_consumer; | 1237 | adapter->last_cmd_consumer = last_consumer; |
1272 | while ((adapter->last_cmd_consumer != consumer) | 1238 | smp_mb(); |
1273 | && (count2 < MAX_STATUS_HANDLE)) { | 1239 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { |
1274 | buffer = | 1240 | netif_tx_lock(netdev); |
1275 | &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; | 1241 | netif_wake_queue(netdev); |
1276 | count2++; | 1242 | smp_mb(); |
1277 | if (buffer->skb) | 1243 | netif_tx_unlock(netdev); |
1278 | break; | ||
1279 | else | ||
1280 | adapter->last_cmd_consumer = | ||
1281 | get_next_index(adapter->last_cmd_consumer, | ||
1282 | adapter->max_tx_desc_count); | ||
1283 | } | ||
1284 | } | ||
1285 | if (count1 || count2) { | ||
1286 | if (netif_queue_stopped(adapter->netdev) | ||
1287 | && (adapter->flags & NETXEN_NETDEV_STATUS)) { | ||
1288 | netif_wake_queue(adapter->netdev); | ||
1289 | adapter->flags &= ~NETXEN_NETDEV_STATUS; | ||
1290 | } | 1244 | } |
1291 | } | 1245 | } |
1292 | /* | 1246 | /* |
@@ -1302,16 +1256,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1302 | * There is still a possible race condition and the host could miss an | 1256 | * There is still a possible race condition and the host could miss an |
1303 | * interrupt. The card has to take care of this. | 1257 | * interrupt. The card has to take care of this. |
1304 | */ | 1258 | */ |
1305 | if (adapter->last_cmd_consumer == consumer && | 1259 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
1306 | (((adapter->cmd_producer + 1) % | 1260 | done = (last_consumer == consumer); |
1307 | adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) { | ||
1308 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | ||
1309 | } | ||
1310 | done = (adapter->last_cmd_consumer == consumer); | ||
1311 | 1261 | ||
1312 | spin_unlock(&adapter->tx_lock); | ||
1313 | DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer, | ||
1314 | __FUNCTION__); | ||
1315 | return (done); | 1262 | return (done); |
1316 | } | 1263 | } |
1317 | 1264 | ||
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 95955204ef59..dc4d593217c9 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -317,7 +317,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
317 | 317 | ||
318 | adapter->ahw.pdev = pdev; | 318 | adapter->ahw.pdev = pdev; |
319 | adapter->ahw.pci_func = pci_func_id; | 319 | adapter->ahw.pci_func = pci_func_id; |
320 | spin_lock_init(&adapter->tx_lock); | ||
321 | 320 | ||
322 | /* remap phys address */ | 321 | /* remap phys address */ |
323 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | 322 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ |
@@ -533,7 +532,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
533 | adapter->watchdog_timer.data = (unsigned long)adapter; | 532 | adapter->watchdog_timer.data = (unsigned long)adapter; |
534 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); | 533 | INIT_WORK(&adapter->watchdog_task, netxen_watchdog_task); |
535 | adapter->ahw.pdev = pdev; | 534 | adapter->ahw.pdev = pdev; |
536 | adapter->proc_cmd_buf_counter = 0; | ||
537 | adapter->ahw.revision_id = pdev->revision; | 535 | adapter->ahw.revision_id = pdev->revision; |
538 | 536 | ||
539 | /* make sure Window == 1 */ | 537 | /* make sure Window == 1 */ |
@@ -952,41 +950,17 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
952 | struct netxen_skb_frag *buffrag; | 950 | struct netxen_skb_frag *buffrag; |
953 | unsigned int i; | 951 | unsigned int i; |
954 | 952 | ||
955 | u32 producer = 0; | 953 | u32 producer, consumer; |
956 | u32 saved_producer = 0; | 954 | u32 saved_producer = 0; |
957 | struct cmd_desc_type0 *hwdesc; | 955 | struct cmd_desc_type0 *hwdesc; |
958 | int k; | 956 | int k; |
959 | struct netxen_cmd_buffer *pbuf = NULL; | 957 | struct netxen_cmd_buffer *pbuf = NULL; |
960 | static int dropped_packet = 0; | ||
961 | int frag_count; | 958 | int frag_count; |
962 | u32 local_producer = 0; | ||
963 | u32 max_tx_desc_count = 0; | ||
964 | u32 last_cmd_consumer = 0; | ||
965 | int no_of_desc; | 959 | int no_of_desc; |
960 | u32 num_txd = adapter->max_tx_desc_count; | ||
966 | 961 | ||
967 | adapter->stats.xmitcalled++; | ||
968 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 962 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
969 | 963 | ||
970 | if (unlikely(skb->len <= 0)) { | ||
971 | dev_kfree_skb_any(skb); | ||
972 | adapter->stats.badskblen++; | ||
973 | return NETDEV_TX_OK; | ||
974 | } | ||
975 | |||
976 | if (frag_count > MAX_BUFFERS_PER_CMD) { | ||
977 | printk("%s: %s netxen_nic_xmit_frame: frag_count (%d) " | ||
978 | "too large, can handle only %d frags\n", | ||
979 | netxen_nic_driver_name, netdev->name, | ||
980 | frag_count, MAX_BUFFERS_PER_CMD); | ||
981 | adapter->stats.txdropped++; | ||
982 | if ((++dropped_packet & 0xff) == 0xff) | ||
983 | printk("%s: %s droppped packets = %d\n", | ||
984 | netxen_nic_driver_name, netdev->name, | ||
985 | dropped_packet); | ||
986 | |||
987 | return NETDEV_TX_OK; | ||
988 | } | ||
989 | |||
990 | /* There 4 fragments per descriptor */ | 964 | /* There 4 fragments per descriptor */ |
991 | no_of_desc = (frag_count + 3) >> 2; | 965 | no_of_desc = (frag_count + 3) >> 2; |
992 | if (netdev->features & NETIF_F_TSO) { | 966 | if (netdev->features & NETIF_F_TSO) { |
@@ -1001,27 +975,16 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1001 | } | 975 | } |
1002 | } | 976 | } |
1003 | 977 | ||
1004 | spin_lock_bh(&adapter->tx_lock); | 978 | producer = adapter->cmd_producer; |
1005 | if (adapter->total_threads >= MAX_XMIT_PRODUCERS) { | 979 | smp_mb(); |
1006 | goto out_requeue; | 980 | consumer = adapter->last_cmd_consumer; |
981 | if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) { | ||
982 | netif_stop_queue(netdev); | ||
983 | smp_mb(); | ||
984 | return NETDEV_TX_BUSY; | ||
1007 | } | 985 | } |
1008 | local_producer = adapter->cmd_producer; | ||
1009 | k = adapter->cmd_producer; | ||
1010 | max_tx_desc_count = adapter->max_tx_desc_count; | ||
1011 | last_cmd_consumer = adapter->last_cmd_consumer; | ||
1012 | if ((k + no_of_desc) >= | ||
1013 | ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count : | ||
1014 | last_cmd_consumer)) { | ||
1015 | goto out_requeue; | ||
1016 | } | ||
1017 | k = get_index_range(k, max_tx_desc_count, no_of_desc); | ||
1018 | adapter->cmd_producer = k; | ||
1019 | adapter->total_threads++; | ||
1020 | adapter->num_threads++; | ||
1021 | 986 | ||
1022 | spin_unlock_bh(&adapter->tx_lock); | ||
1023 | /* Copy the descriptors into the hardware */ | 987 | /* Copy the descriptors into the hardware */ |
1024 | producer = local_producer; | ||
1025 | saved_producer = producer; | 988 | saved_producer = producer; |
1026 | hwdesc = &hw->cmd_desc_head[producer]; | 989 | hwdesc = &hw->cmd_desc_head[producer]; |
1027 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | 990 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); |
@@ -1061,8 +1024,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1061 | /* move to next desc. if there is a need */ | 1024 | /* move to next desc. if there is a need */ |
1062 | if ((i & 0x3) == 0) { | 1025 | if ((i & 0x3) == 0) { |
1063 | k = 0; | 1026 | k = 0; |
1064 | producer = get_next_index(producer, | 1027 | producer = get_next_index(producer, num_txd); |
1065 | adapter->max_tx_desc_count); | ||
1066 | hwdesc = &hw->cmd_desc_head[producer]; | 1028 | hwdesc = &hw->cmd_desc_head[producer]; |
1067 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | 1029 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); |
1068 | pbuf = &adapter->cmd_buf_arr[producer]; | 1030 | pbuf = &adapter->cmd_buf_arr[producer]; |
@@ -1080,7 +1042,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1080 | buffrag->dma = temp_dma; | 1042 | buffrag->dma = temp_dma; |
1081 | buffrag->length = temp_len; | 1043 | buffrag->length = temp_len; |
1082 | 1044 | ||
1083 | DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k); | ||
1084 | switch (k) { | 1045 | switch (k) { |
1085 | case 0: | 1046 | case 0: |
1086 | hwdesc->buffer1_length = cpu_to_le16(temp_len); | 1047 | hwdesc->buffer1_length = cpu_to_le16(temp_len); |
@@ -1101,7 +1062,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1101 | } | 1062 | } |
1102 | frag++; | 1063 | frag++; |
1103 | } | 1064 | } |
1104 | producer = get_next_index(producer, adapter->max_tx_desc_count); | 1065 | producer = get_next_index(producer, num_txd); |
1105 | 1066 | ||
1106 | /* might change opcode to TX_TCP_LSO */ | 1067 | /* might change opcode to TX_TCP_LSO */ |
1107 | netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb); | 1068 | netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb); |
@@ -1128,7 +1089,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1128 | /* copy the first 64 bytes */ | 1089 | /* copy the first 64 bytes */ |
1129 | memcpy(((void *)hwdesc) + 2, | 1090 | memcpy(((void *)hwdesc) + 2, |
1130 | (void *)(skb->data), first_hdr_len); | 1091 | (void *)(skb->data), first_hdr_len); |
1131 | producer = get_next_index(producer, max_tx_desc_count); | 1092 | producer = get_next_index(producer, num_txd); |
1132 | 1093 | ||
1133 | if (more_hdr) { | 1094 | if (more_hdr) { |
1134 | hwdesc = &hw->cmd_desc_head[producer]; | 1095 | hwdesc = &hw->cmd_desc_head[producer]; |
@@ -1141,35 +1102,19 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1141 | hwdesc, | 1102 | hwdesc, |
1142 | (hdr_len - | 1103 | (hdr_len - |
1143 | first_hdr_len)); | 1104 | first_hdr_len)); |
1144 | producer = get_next_index(producer, max_tx_desc_count); | 1105 | producer = get_next_index(producer, num_txd); |
1145 | } | 1106 | } |
1146 | } | 1107 | } |
1147 | 1108 | ||
1148 | spin_lock_bh(&adapter->tx_lock); | 1109 | adapter->cmd_producer = producer; |
1149 | adapter->stats.txbytes += skb->len; | 1110 | adapter->stats.txbytes += skb->len; |
1150 | 1111 | ||
1151 | /* Code to update the adapter considering how many producer threads | 1112 | netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); |
1152 | are currently working */ | ||
1153 | if ((--adapter->num_threads) == 0) { | ||
1154 | /* This is the last thread */ | ||
1155 | u32 crb_producer = adapter->cmd_producer; | ||
1156 | netxen_nic_update_cmd_producer(adapter, crb_producer); | ||
1157 | wmb(); | ||
1158 | adapter->total_threads = 0; | ||
1159 | } | ||
1160 | 1113 | ||
1161 | adapter->stats.xmitfinished++; | 1114 | adapter->stats.xmitcalled++; |
1162 | netdev->trans_start = jiffies; | 1115 | netdev->trans_start = jiffies; |
1163 | 1116 | ||
1164 | spin_unlock_bh(&adapter->tx_lock); | ||
1165 | return NETDEV_TX_OK; | 1117 | return NETDEV_TX_OK; |
1166 | |||
1167 | out_requeue: | ||
1168 | netif_stop_queue(netdev); | ||
1169 | adapter->flags |= NETXEN_NETDEV_STATUS; | ||
1170 | |||
1171 | spin_unlock_bh(&adapter->tx_lock); | ||
1172 | return NETDEV_TX_BUSY; | ||
1173 | } | 1118 | } |
1174 | 1119 | ||
1175 | static void netxen_watchdog(unsigned long v) | 1120 | static void netxen_watchdog(unsigned long v) |
@@ -1194,9 +1139,13 @@ static void netxen_tx_timeout_task(struct work_struct *work) | |||
1194 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | 1139 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", |
1195 | netxen_nic_driver_name, adapter->netdev->name); | 1140 | netxen_nic_driver_name, adapter->netdev->name); |
1196 | 1141 | ||
1197 | netxen_nic_close(adapter->netdev); | 1142 | netxen_nic_disable_int(adapter); |
1198 | netxen_nic_open(adapter->netdev); | 1143 | napi_disable(&adapter->napi); |
1144 | |||
1199 | adapter->netdev->trans_start = jiffies; | 1145 | adapter->netdev->trans_start = jiffies; |
1146 | |||
1147 | napi_enable(&adapter->napi); | ||
1148 | netxen_nic_enable_int(adapter); | ||
1200 | netif_wake_queue(adapter->netdev); | 1149 | netif_wake_queue(adapter->netdev); |
1201 | } | 1150 | } |
1202 | 1151 | ||