diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-04-07 18:50:40 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-04-08 18:58:25 -0400 |
commit | d877f1e344f5515988d9dcd6db5d4285911778a3 (patch) | |
tree | 27b711302f80335942d870655ee80b7ba6e0eac0 /drivers/net/netxen/netxen_nic_init.c | |
parent | 1b1f789818c6be9437cfe199932ee13faafca60f (diff) |
netxen: refactor transmit code
o move tx stuff into nx_host_tx_ring structure, this will
help managing multiple tx rings in future.
o sanitize some variable names
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 48 |
1 files changed, 26 insertions, 22 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 0759c35f16ac..8e45dcc27c7f 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -173,9 +173,10 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
173 | struct netxen_cmd_buffer *cmd_buf; | 173 | struct netxen_cmd_buffer *cmd_buf; |
174 | struct netxen_skb_frag *buffrag; | 174 | struct netxen_skb_frag *buffrag; |
175 | int i, j; | 175 | int i, j; |
176 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
176 | 177 | ||
177 | cmd_buf = adapter->cmd_buf_arr; | 178 | cmd_buf = tx_ring->cmd_buf_arr; |
178 | for (i = 0; i < adapter->num_txd; i++) { | 179 | for (i = 0; i < tx_ring->num_desc; i++) { |
179 | buffrag = cmd_buf->frag_array; | 180 | buffrag = cmd_buf->frag_array; |
180 | if (buffrag->dma) { | 181 | if (buffrag->dma) { |
181 | pci_unmap_single(adapter->pdev, buffrag->dma, | 182 | pci_unmap_single(adapter->pdev, buffrag->dma, |
@@ -203,6 +204,7 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
203 | { | 204 | { |
204 | struct netxen_recv_context *recv_ctx; | 205 | struct netxen_recv_context *recv_ctx; |
205 | struct nx_host_rds_ring *rds_ring; | 206 | struct nx_host_rds_ring *rds_ring; |
207 | struct nx_host_tx_ring *tx_ring; | ||
206 | int ring; | 208 | int ring; |
207 | 209 | ||
208 | recv_ctx = &adapter->recv_ctx; | 210 | recv_ctx = &adapter->recv_ctx; |
@@ -214,8 +216,9 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
214 | } | 216 | } |
215 | } | 217 | } |
216 | 218 | ||
217 | if (adapter->cmd_buf_arr) | 219 | tx_ring = &adapter->tx_ring; |
218 | vfree(adapter->cmd_buf_arr); | 220 | if (tx_ring->cmd_buf_arr) |
221 | vfree(tx_ring->cmd_buf_arr); | ||
219 | return; | 222 | return; |
220 | } | 223 | } |
221 | 224 | ||
@@ -224,21 +227,24 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
224 | struct netxen_recv_context *recv_ctx; | 227 | struct netxen_recv_context *recv_ctx; |
225 | struct nx_host_rds_ring *rds_ring; | 228 | struct nx_host_rds_ring *rds_ring; |
226 | struct nx_host_sds_ring *sds_ring; | 229 | struct nx_host_sds_ring *sds_ring; |
230 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
227 | struct netxen_rx_buffer *rx_buf; | 231 | struct netxen_rx_buffer *rx_buf; |
228 | int ring, i, num_rx_bufs; | 232 | int ring, i, num_rx_bufs; |
229 | 233 | ||
230 | struct netxen_cmd_buffer *cmd_buf_arr; | 234 | struct netxen_cmd_buffer *cmd_buf_arr; |
231 | struct net_device *netdev = adapter->netdev; | 235 | struct net_device *netdev = adapter->netdev; |
236 | struct pci_dev *pdev = adapter->pdev; | ||
232 | 237 | ||
238 | tx_ring->num_desc = adapter->num_txd; | ||
233 | cmd_buf_arr = | 239 | cmd_buf_arr = |
234 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter)); | 240 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(tx_ring)); |
235 | if (cmd_buf_arr == NULL) { | 241 | if (cmd_buf_arr == NULL) { |
236 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", | 242 | dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", |
237 | netdev->name); | 243 | netdev->name); |
238 | return -ENOMEM; | 244 | return -ENOMEM; |
239 | } | 245 | } |
240 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter)); | 246 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
241 | adapter->cmd_buf_arr = cmd_buf_arr; | 247 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
242 | 248 | ||
243 | recv_ctx = &adapter->recv_ctx; | 249 | recv_ctx = &adapter->recv_ctx; |
244 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 250 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
@@ -307,8 +313,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
307 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 313 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
308 | sds_ring = &recv_ctx->sds_rings[ring]; | 314 | sds_ring = &recv_ctx->sds_rings[ring]; |
309 | sds_ring->irq = adapter->msix_entries[ring].vector; | 315 | sds_ring->irq = adapter->msix_entries[ring].vector; |
310 | sds_ring->clean_tx = (ring == 0); | ||
311 | sds_ring->post_rxd = (ring == 0); | ||
312 | sds_ring->adapter = adapter; | 316 | sds_ring->adapter = adapter; |
313 | sds_ring->num_desc = adapter->num_rxd; | 317 | sds_ring->num_desc = adapter->num_rxd; |
314 | 318 | ||
@@ -990,23 +994,24 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) | |||
990 | /* Process Command status ring */ | 994 | /* Process Command status ring */ |
991 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) | 995 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) |
992 | { | 996 | { |
993 | u32 last_consumer, consumer; | 997 | u32 sw_consumer, hw_consumer; |
994 | int count = 0, i; | 998 | int count = 0, i; |
995 | struct netxen_cmd_buffer *buffer; | 999 | struct netxen_cmd_buffer *buffer; |
996 | struct pci_dev *pdev = adapter->pdev; | 1000 | struct pci_dev *pdev = adapter->pdev; |
997 | struct net_device *netdev = adapter->netdev; | 1001 | struct net_device *netdev = adapter->netdev; |
998 | struct netxen_skb_frag *frag; | 1002 | struct netxen_skb_frag *frag; |
999 | int done = 0; | 1003 | int done = 0; |
1004 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
1000 | 1005 | ||
1001 | if (!spin_trylock(&adapter->tx_clean_lock)) | 1006 | if (!spin_trylock(&adapter->tx_clean_lock)) |
1002 | return 1; | 1007 | return 1; |
1003 | 1008 | ||
1004 | last_consumer = adapter->last_cmd_consumer; | 1009 | sw_consumer = tx_ring->sw_consumer; |
1005 | barrier(); /* cmd_consumer can change underneath */ | 1010 | barrier(); /* hw_consumer can change underneath */ |
1006 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1011 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
1007 | 1012 | ||
1008 | while (last_consumer != consumer) { | 1013 | while (sw_consumer != hw_consumer) { |
1009 | buffer = &adapter->cmd_buf_arr[last_consumer]; | 1014 | buffer = &tx_ring->cmd_buf_arr[sw_consumer]; |
1010 | if (buffer->skb) { | 1015 | if (buffer->skb) { |
1011 | frag = &buffer->frag_array[0]; | 1016 | frag = &buffer->frag_array[0]; |
1012 | pci_unmap_single(pdev, frag->dma, frag->length, | 1017 | pci_unmap_single(pdev, frag->dma, frag->length, |
@@ -1024,14 +1029,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1024 | buffer->skb = NULL; | 1029 | buffer->skb = NULL; |
1025 | } | 1030 | } |
1026 | 1031 | ||
1027 | last_consumer = get_next_index(last_consumer, | 1032 | sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); |
1028 | adapter->num_txd); | ||
1029 | if (++count >= MAX_STATUS_HANDLE) | 1033 | if (++count >= MAX_STATUS_HANDLE) |
1030 | break; | 1034 | break; |
1031 | } | 1035 | } |
1032 | 1036 | ||
1033 | if (count) { | 1037 | if (count) { |
1034 | adapter->last_cmd_consumer = last_consumer; | 1038 | tx_ring->sw_consumer = sw_consumer; |
1035 | smp_mb(); | 1039 | smp_mb(); |
1036 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { | 1040 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { |
1037 | netif_tx_lock(netdev); | 1041 | netif_tx_lock(netdev); |
@@ -1053,9 +1057,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1053 | * There is still a possible race condition and the host could miss an | 1057 | * There is still a possible race condition and the host could miss an |
1054 | * interrupt. The card has to take care of this. | 1058 | * interrupt. The card has to take care of this. |
1055 | */ | 1059 | */ |
1056 | barrier(); /* cmd_consumer can change underneath */ | 1060 | barrier(); /* hw_consumer can change underneath */ |
1057 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1061 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
1058 | done = (last_consumer == consumer); | 1062 | done = (sw_consumer == hw_consumer); |
1059 | spin_unlock(&adapter->tx_clean_lock); | 1063 | spin_unlock(&adapter->tx_clean_lock); |
1060 | 1064 | ||
1061 | return (done); | 1065 | return (done); |