diff options
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 42 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_ctx.c | 49 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_hw.c | 37 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 48 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 52 |
5 files changed, 108 insertions, 120 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 1165f53ea2ca..2aa658db103f 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -74,10 +74,10 @@ | |||
74 | (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) | 74 | (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) |
75 | #define STATUS_DESC_RINGSIZE(sds_ring) \ | 75 | #define STATUS_DESC_RINGSIZE(sds_ring) \ |
76 | (sizeof(struct status_desc) * (sds_ring)->num_desc) | 76 | (sizeof(struct status_desc) * (sds_ring)->num_desc) |
77 | #define TX_BUFF_RINGSIZE(adapter) \ | 77 | #define TX_BUFF_RINGSIZE(tx_ring) \ |
78 | (sizeof(struct netxen_cmd_buffer) * adapter->num_txd) | 78 | (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc) |
79 | #define TX_DESC_RINGSIZE(adapter) \ | 79 | #define TX_DESC_RINGSIZE(tx_ring) \ |
80 | (sizeof(struct cmd_desc_type0) * adapter->num_txd) | 80 | (sizeof(struct cmd_desc_type0) * tx_ring->num_desc) |
81 | 81 | ||
82 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) | 82 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) |
83 | 83 | ||
@@ -639,7 +639,7 @@ extern char netxen_nic_driver_name[]; | |||
639 | */ | 639 | */ |
640 | struct netxen_skb_frag { | 640 | struct netxen_skb_frag { |
641 | u64 dma; | 641 | u64 dma; |
642 | ulong length; | 642 | u64 length; |
643 | }; | 643 | }; |
644 | 644 | ||
645 | #define _netxen_set_bits(config_word, start, bits, val) {\ | 645 | #define _netxen_set_bits(config_word, start, bits, val) {\ |
@@ -704,9 +704,6 @@ struct netxen_hardware_context { | |||
704 | u8 linkup; | 704 | u8 linkup; |
705 | u16 port_type; | 705 | u16 port_type; |
706 | u16 board_type; | 706 | u16 board_type; |
707 | /* Address of cmd ring in Phantom */ | ||
708 | struct cmd_desc_type0 *cmd_desc_head; | ||
709 | dma_addr_t cmd_desc_phys_addr; | ||
710 | }; | 707 | }; |
711 | 708 | ||
712 | #define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ | 709 | #define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ |
@@ -752,14 +749,25 @@ struct nx_host_sds_ring { | |||
752 | struct napi_struct napi; | 749 | struct napi_struct napi; |
753 | struct list_head free_list[NUM_RCV_DESC_RINGS]; | 750 | struct list_head free_list[NUM_RCV_DESC_RINGS]; |
754 | 751 | ||
755 | u16 clean_tx; | ||
756 | u16 post_rxd; | ||
757 | int irq; | 752 | int irq; |
758 | 753 | ||
759 | dma_addr_t phys_addr; | 754 | dma_addr_t phys_addr; |
760 | char name[IFNAMSIZ+4]; | 755 | char name[IFNAMSIZ+4]; |
761 | }; | 756 | }; |
762 | 757 | ||
758 | struct nx_host_tx_ring { | ||
759 | u32 producer; | ||
760 | __le32 *hw_consumer; | ||
761 | u32 sw_consumer; | ||
762 | u32 crb_cmd_producer; | ||
763 | u32 crb_cmd_consumer; | ||
764 | u32 num_desc; | ||
765 | |||
766 | struct netxen_cmd_buffer *cmd_buf_arr; | ||
767 | struct cmd_desc_type0 *desc_head; | ||
768 | dma_addr_t phys_addr; | ||
769 | }; | ||
770 | |||
763 | /* | 771 | /* |
764 | * Receive context. There is one such structure per instance of the | 772 | * Receive context. There is one such structure per instance of the |
765 | * receive processing. Any state information that is relevant to | 773 | * receive processing. Any state information that is relevant to |
@@ -1152,11 +1160,6 @@ struct netxen_adapter { | |||
1152 | rwlock_t adapter_lock; | 1160 | rwlock_t adapter_lock; |
1153 | 1161 | ||
1154 | spinlock_t tx_clean_lock; | 1162 | spinlock_t tx_clean_lock; |
1155 | u32 cmd_producer; | ||
1156 | u32 last_cmd_consumer; | ||
1157 | u32 crb_addr_cmd_producer; | ||
1158 | u32 crb_addr_cmd_consumer; | ||
1159 | __le32 *cmd_consumer; | ||
1160 | 1163 | ||
1161 | u32 num_txd; | 1164 | u32 num_txd; |
1162 | u32 num_rxd; | 1165 | u32 num_rxd; |
@@ -1191,13 +1194,8 @@ struct netxen_adapter { | |||
1191 | 1194 | ||
1192 | struct netxen_adapter_stats stats; | 1195 | struct netxen_adapter_stats stats; |
1193 | 1196 | ||
1194 | struct netxen_cmd_buffer *cmd_buf_arr; /* Command buffers for xmit */ | ||
1195 | |||
1196 | /* | ||
1197 | * Receive instances. These can be either one per port, | ||
1198 | * or one per peg, etc. | ||
1199 | */ | ||
1200 | struct netxen_recv_context recv_ctx; | 1197 | struct netxen_recv_context recv_ctx; |
1198 | struct nx_host_tx_ring tx_ring; | ||
1201 | 1199 | ||
1202 | /* Context interface shared between card and host */ | 1200 | /* Context interface shared between card and host */ |
1203 | struct netxen_ring_ctx *ctx_desc; | 1201 | struct netxen_ring_ctx *ctx_desc; |
@@ -1409,7 +1407,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p); | |||
1409 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); | 1407 | struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); |
1410 | 1408 | ||
1411 | void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | 1409 | void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, |
1412 | uint32_t crb_producer); | 1410 | struct nx_host_tx_ring *tx_ring, uint32_t crb_producer); |
1413 | 1411 | ||
1414 | /* | 1412 | /* |
1415 | * NetXen Board information | 1413 | * NetXen Board information |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 73f6debacf9c..794335188a26 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -328,6 +328,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) | |||
328 | int err = 0; | 328 | int err = 0; |
329 | u64 offset, phys_addr; | 329 | u64 offset, phys_addr; |
330 | dma_addr_t rq_phys_addr, rsp_phys_addr; | 330 | dma_addr_t rq_phys_addr, rsp_phys_addr; |
331 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
331 | 332 | ||
332 | rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); | 333 | rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); |
333 | rq_addr = pci_alloc_consistent(adapter->pdev, | 334 | rq_addr = pci_alloc_consistent(adapter->pdev, |
@@ -367,10 +368,8 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) | |||
367 | 368 | ||
368 | prq_cds = &prq->cds_ring; | 369 | prq_cds = &prq->cds_ring; |
369 | 370 | ||
370 | prq_cds->host_phys_addr = | 371 | prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); |
371 | cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); | 372 | prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); |
372 | |||
373 | prq_cds->ring_size = cpu_to_le32(adapter->num_txd); | ||
374 | 373 | ||
375 | phys_addr = rq_phys_addr; | 374 | phys_addr = rq_phys_addr; |
376 | err = netxen_issue_cmd(adapter, | 375 | err = netxen_issue_cmd(adapter, |
@@ -383,8 +382,7 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) | |||
383 | 382 | ||
384 | if (err == NX_RCODE_SUCCESS) { | 383 | if (err == NX_RCODE_SUCCESS) { |
385 | temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); | 384 | temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); |
386 | adapter->crb_addr_cmd_producer = | 385 | tx_ring->crb_cmd_producer = NETXEN_NIC_REG(temp - 0x200); |
387 | NETXEN_NIC_REG(temp - 0x200); | ||
388 | #if 0 | 386 | #if 0 |
389 | adapter->tx_state = | 387 | adapter->tx_state = |
390 | le32_to_cpu(prsp->host_ctx_state); | 388 | le32_to_cpu(prsp->host_ctx_state); |
@@ -497,13 +495,13 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) | |||
497 | struct netxen_recv_context *recv_ctx; | 495 | struct netxen_recv_context *recv_ctx; |
498 | struct nx_host_rds_ring *rds_ring; | 496 | struct nx_host_rds_ring *rds_ring; |
499 | struct nx_host_sds_ring *sds_ring; | 497 | struct nx_host_sds_ring *sds_ring; |
498 | struct nx_host_tx_ring *tx_ring; | ||
500 | int ring; | 499 | int ring; |
501 | int func_id = adapter->portnum; | 500 | int func_id = adapter->portnum; |
502 | 501 | ||
503 | adapter->ctx_desc->cmd_ring_addr = | 502 | tx_ring = &adapter->tx_ring; |
504 | cpu_to_le64(adapter->ahw.cmd_desc_phys_addr); | 503 | adapter->ctx_desc->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); |
505 | adapter->ctx_desc->cmd_ring_size = | 504 | adapter->ctx_desc->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); |
506 | cpu_to_le32(adapter->num_txd); | ||
507 | 505 | ||
508 | recv_ctx = &adapter->recv_ctx; | 506 | recv_ctx = &adapter->recv_ctx; |
509 | 507 | ||
@@ -535,25 +533,17 @@ static uint32_t sw_int_mask[4] = { | |||
535 | 533 | ||
536 | int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | 534 | int netxen_alloc_hw_resources(struct netxen_adapter *adapter) |
537 | { | 535 | { |
538 | struct netxen_hardware_context *hw = &adapter->ahw; | ||
539 | u32 state = 0; | ||
540 | void *addr; | 536 | void *addr; |
541 | int err = 0; | 537 | int err = 0; |
542 | int ring; | 538 | int ring; |
543 | struct netxen_recv_context *recv_ctx; | 539 | struct netxen_recv_context *recv_ctx; |
544 | struct nx_host_rds_ring *rds_ring; | 540 | struct nx_host_rds_ring *rds_ring; |
545 | struct nx_host_sds_ring *sds_ring; | 541 | struct nx_host_sds_ring *sds_ring; |
542 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
546 | 543 | ||
547 | struct pci_dev *pdev = adapter->pdev; | 544 | struct pci_dev *pdev = adapter->pdev; |
548 | struct net_device *netdev = adapter->netdev; | 545 | struct net_device *netdev = adapter->netdev; |
549 | 546 | ||
550 | err = netxen_receive_peg_ready(adapter); | ||
551 | if (err) { | ||
552 | printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n", | ||
553 | state); | ||
554 | return err; | ||
555 | } | ||
556 | |||
557 | addr = pci_alloc_consistent(pdev, | 547 | addr = pci_alloc_consistent(pdev, |
558 | sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), | 548 | sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), |
559 | &adapter->ctx_desc_phys_addr); | 549 | &adapter->ctx_desc_phys_addr); |
@@ -568,13 +558,12 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
568 | adapter->ctx_desc->cmd_consumer_offset = | 558 | adapter->ctx_desc->cmd_consumer_offset = |
569 | cpu_to_le64(adapter->ctx_desc_phys_addr + | 559 | cpu_to_le64(adapter->ctx_desc_phys_addr + |
570 | sizeof(struct netxen_ring_ctx)); | 560 | sizeof(struct netxen_ring_ctx)); |
571 | adapter->cmd_consumer = | 561 | tx_ring->hw_consumer = |
572 | (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); | 562 | (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); |
573 | 563 | ||
574 | /* cmd desc ring */ | 564 | /* cmd desc ring */ |
575 | addr = pci_alloc_consistent(pdev, | 565 | addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), |
576 | TX_DESC_RINGSIZE(adapter), | 566 | &tx_ring->phys_addr); |
577 | &hw->cmd_desc_phys_addr); | ||
578 | 567 | ||
579 | if (addr == NULL) { | 568 | if (addr == NULL) { |
580 | dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", | 569 | dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", |
@@ -582,7 +571,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
582 | return -ENOMEM; | 571 | return -ENOMEM; |
583 | } | 572 | } |
584 | 573 | ||
585 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; | 574 | tx_ring->desc_head = (struct cmd_desc_type0 *)addr; |
586 | 575 | ||
587 | recv_ctx = &adapter->recv_ctx; | 576 | recv_ctx = &adapter->recv_ctx; |
588 | 577 | ||
@@ -658,6 +647,7 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
658 | struct netxen_recv_context *recv_ctx; | 647 | struct netxen_recv_context *recv_ctx; |
659 | struct nx_host_rds_ring *rds_ring; | 648 | struct nx_host_rds_ring *rds_ring; |
660 | struct nx_host_sds_ring *sds_ring; | 649 | struct nx_host_sds_ring *sds_ring; |
650 | struct nx_host_tx_ring *tx_ring; | ||
661 | int ring; | 651 | int ring; |
662 | 652 | ||
663 | if (adapter->fw_major >= 4) { | 653 | if (adapter->fw_major >= 4) { |
@@ -674,13 +664,12 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
674 | adapter->ctx_desc = NULL; | 664 | adapter->ctx_desc = NULL; |
675 | } | 665 | } |
676 | 666 | ||
677 | if (adapter->ahw.cmd_desc_head != NULL) { | 667 | tx_ring = &adapter->tx_ring; |
668 | if (tx_ring->desc_head != NULL) { | ||
678 | pci_free_consistent(adapter->pdev, | 669 | pci_free_consistent(adapter->pdev, |
679 | sizeof(struct cmd_desc_type0) * | 670 | TX_DESC_RINGSIZE(tx_ring), |
680 | adapter->num_txd, | 671 | tx_ring->desc_head, tx_ring->phys_addr); |
681 | adapter->ahw.cmd_desc_head, | 672 | tx_ring->desc_head = NULL; |
682 | adapter->ahw.cmd_desc_phys_addr); | ||
683 | adapter->ahw.cmd_desc_head = NULL; | ||
684 | } | 673 | } |
685 | 674 | ||
686 | recv_ctx = &adapter->recv_ctx; | 675 | recv_ctx = &adapter->recv_ctx; |
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 67d63eecc9cb..8416962cc9ac 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -501,45 +501,44 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, | |||
501 | 501 | ||
502 | static int | 502 | static int |
503 | netxen_send_cmd_descs(struct netxen_adapter *adapter, | 503 | netxen_send_cmd_descs(struct netxen_adapter *adapter, |
504 | struct cmd_desc_type0 *cmd_desc_arr, int nr_elements) | 504 | struct cmd_desc_type0 *cmd_desc_arr, int nr_desc) |
505 | { | 505 | { |
506 | uint32_t i, producer; | 506 | u32 i, producer, consumer; |
507 | struct netxen_cmd_buffer *pbuf; | 507 | struct netxen_cmd_buffer *pbuf; |
508 | struct cmd_desc_type0 *cmd_desc; | 508 | struct cmd_desc_type0 *cmd_desc; |
509 | 509 | struct nx_host_tx_ring *tx_ring; | |
510 | if (nr_elements > MAX_PENDING_DESC_BLOCK_SIZE || nr_elements == 0) { | ||
511 | printk(KERN_WARNING "%s: Too many command descriptors in a " | ||
512 | "request\n", __func__); | ||
513 | return -EINVAL; | ||
514 | } | ||
515 | 510 | ||
516 | i = 0; | 511 | i = 0; |
517 | 512 | ||
513 | tx_ring = &adapter->tx_ring; | ||
518 | netif_tx_lock_bh(adapter->netdev); | 514 | netif_tx_lock_bh(adapter->netdev); |
519 | 515 | ||
520 | producer = adapter->cmd_producer; | 516 | producer = tx_ring->producer; |
517 | consumer = tx_ring->sw_consumer; | ||
518 | |||
519 | if (nr_desc > find_diff_among(producer, consumer, tx_ring->num_desc)) { | ||
520 | netif_tx_unlock_bh(adapter->netdev); | ||
521 | return -EBUSY; | ||
522 | } | ||
523 | |||
521 | do { | 524 | do { |
522 | cmd_desc = &cmd_desc_arr[i]; | 525 | cmd_desc = &cmd_desc_arr[i]; |
523 | 526 | ||
524 | pbuf = &adapter->cmd_buf_arr[producer]; | 527 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
525 | pbuf->skb = NULL; | 528 | pbuf->skb = NULL; |
526 | pbuf->frag_count = 0; | 529 | pbuf->frag_count = 0; |
527 | 530 | ||
528 | /* adapter->ahw.cmd_desc_head[producer] = *cmd_desc; */ | 531 | memcpy(&tx_ring->desc_head[producer], |
529 | memcpy(&adapter->ahw.cmd_desc_head[producer], | ||
530 | &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); | 532 | &cmd_desc_arr[i], sizeof(struct cmd_desc_type0)); |
531 | 533 | ||
532 | producer = get_next_index(producer, | 534 | producer = get_next_index(producer, tx_ring->num_desc); |
533 | adapter->num_txd); | ||
534 | i++; | 535 | i++; |
535 | 536 | ||
536 | } while (i != nr_elements); | 537 | } while (i != nr_desc); |
537 | |||
538 | adapter->cmd_producer = producer; | ||
539 | 538 | ||
540 | /* write producer index to start the xmit */ | 539 | tx_ring->producer = producer; |
541 | 540 | ||
542 | netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); | 541 | netxen_nic_update_cmd_producer(adapter, tx_ring, producer); |
543 | 542 | ||
544 | netif_tx_unlock_bh(adapter->netdev); | 543 | netif_tx_unlock_bh(adapter->netdev); |
545 | 544 | ||
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 0759c35f16ac..8e45dcc27c7f 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -173,9 +173,10 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
173 | struct netxen_cmd_buffer *cmd_buf; | 173 | struct netxen_cmd_buffer *cmd_buf; |
174 | struct netxen_skb_frag *buffrag; | 174 | struct netxen_skb_frag *buffrag; |
175 | int i, j; | 175 | int i, j; |
176 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
176 | 177 | ||
177 | cmd_buf = adapter->cmd_buf_arr; | 178 | cmd_buf = tx_ring->cmd_buf_arr; |
178 | for (i = 0; i < adapter->num_txd; i++) { | 179 | for (i = 0; i < tx_ring->num_desc; i++) { |
179 | buffrag = cmd_buf->frag_array; | 180 | buffrag = cmd_buf->frag_array; |
180 | if (buffrag->dma) { | 181 | if (buffrag->dma) { |
181 | pci_unmap_single(adapter->pdev, buffrag->dma, | 182 | pci_unmap_single(adapter->pdev, buffrag->dma, |
@@ -203,6 +204,7 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
203 | { | 204 | { |
204 | struct netxen_recv_context *recv_ctx; | 205 | struct netxen_recv_context *recv_ctx; |
205 | struct nx_host_rds_ring *rds_ring; | 206 | struct nx_host_rds_ring *rds_ring; |
207 | struct nx_host_tx_ring *tx_ring; | ||
206 | int ring; | 208 | int ring; |
207 | 209 | ||
208 | recv_ctx = &adapter->recv_ctx; | 210 | recv_ctx = &adapter->recv_ctx; |
@@ -214,8 +216,9 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
214 | } | 216 | } |
215 | } | 217 | } |
216 | 218 | ||
217 | if (adapter->cmd_buf_arr) | 219 | tx_ring = &adapter->tx_ring; |
218 | vfree(adapter->cmd_buf_arr); | 220 | if (tx_ring->cmd_buf_arr) |
221 | vfree(tx_ring->cmd_buf_arr); | ||
219 | return; | 222 | return; |
220 | } | 223 | } |
221 | 224 | ||
@@ -224,21 +227,24 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
224 | struct netxen_recv_context *recv_ctx; | 227 | struct netxen_recv_context *recv_ctx; |
225 | struct nx_host_rds_ring *rds_ring; | 228 | struct nx_host_rds_ring *rds_ring; |
226 | struct nx_host_sds_ring *sds_ring; | 229 | struct nx_host_sds_ring *sds_ring; |
230 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
227 | struct netxen_rx_buffer *rx_buf; | 231 | struct netxen_rx_buffer *rx_buf; |
228 | int ring, i, num_rx_bufs; | 232 | int ring, i, num_rx_bufs; |
229 | 233 | ||
230 | struct netxen_cmd_buffer *cmd_buf_arr; | 234 | struct netxen_cmd_buffer *cmd_buf_arr; |
231 | struct net_device *netdev = adapter->netdev; | 235 | struct net_device *netdev = adapter->netdev; |
236 | struct pci_dev *pdev = adapter->pdev; | ||
232 | 237 | ||
238 | tx_ring->num_desc = adapter->num_txd; | ||
233 | cmd_buf_arr = | 239 | cmd_buf_arr = |
234 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter)); | 240 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(tx_ring)); |
235 | if (cmd_buf_arr == NULL) { | 241 | if (cmd_buf_arr == NULL) { |
236 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", | 242 | dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", |
237 | netdev->name); | 243 | netdev->name); |
238 | return -ENOMEM; | 244 | return -ENOMEM; |
239 | } | 245 | } |
240 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter)); | 246 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); |
241 | adapter->cmd_buf_arr = cmd_buf_arr; | 247 | tx_ring->cmd_buf_arr = cmd_buf_arr; |
242 | 248 | ||
243 | recv_ctx = &adapter->recv_ctx; | 249 | recv_ctx = &adapter->recv_ctx; |
244 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 250 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
@@ -307,8 +313,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
307 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | 313 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
308 | sds_ring = &recv_ctx->sds_rings[ring]; | 314 | sds_ring = &recv_ctx->sds_rings[ring]; |
309 | sds_ring->irq = adapter->msix_entries[ring].vector; | 315 | sds_ring->irq = adapter->msix_entries[ring].vector; |
310 | sds_ring->clean_tx = (ring == 0); | ||
311 | sds_ring->post_rxd = (ring == 0); | ||
312 | sds_ring->adapter = adapter; | 316 | sds_ring->adapter = adapter; |
313 | sds_ring->num_desc = adapter->num_rxd; | 317 | sds_ring->num_desc = adapter->num_rxd; |
314 | 318 | ||
@@ -990,23 +994,24 @@ netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) | |||
990 | /* Process Command status ring */ | 994 | /* Process Command status ring */ |
991 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) | 995 | int netxen_process_cmd_ring(struct netxen_adapter *adapter) |
992 | { | 996 | { |
993 | u32 last_consumer, consumer; | 997 | u32 sw_consumer, hw_consumer; |
994 | int count = 0, i; | 998 | int count = 0, i; |
995 | struct netxen_cmd_buffer *buffer; | 999 | struct netxen_cmd_buffer *buffer; |
996 | struct pci_dev *pdev = adapter->pdev; | 1000 | struct pci_dev *pdev = adapter->pdev; |
997 | struct net_device *netdev = adapter->netdev; | 1001 | struct net_device *netdev = adapter->netdev; |
998 | struct netxen_skb_frag *frag; | 1002 | struct netxen_skb_frag *frag; |
999 | int done = 0; | 1003 | int done = 0; |
1004 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; | ||
1000 | 1005 | ||
1001 | if (!spin_trylock(&adapter->tx_clean_lock)) | 1006 | if (!spin_trylock(&adapter->tx_clean_lock)) |
1002 | return 1; | 1007 | return 1; |
1003 | 1008 | ||
1004 | last_consumer = adapter->last_cmd_consumer; | 1009 | sw_consumer = tx_ring->sw_consumer; |
1005 | barrier(); /* cmd_consumer can change underneath */ | 1010 | barrier(); /* hw_consumer can change underneath */ |
1006 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1011 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
1007 | 1012 | ||
1008 | while (last_consumer != consumer) { | 1013 | while (sw_consumer != hw_consumer) { |
1009 | buffer = &adapter->cmd_buf_arr[last_consumer]; | 1014 | buffer = &tx_ring->cmd_buf_arr[sw_consumer]; |
1010 | if (buffer->skb) { | 1015 | if (buffer->skb) { |
1011 | frag = &buffer->frag_array[0]; | 1016 | frag = &buffer->frag_array[0]; |
1012 | pci_unmap_single(pdev, frag->dma, frag->length, | 1017 | pci_unmap_single(pdev, frag->dma, frag->length, |
@@ -1024,14 +1029,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1024 | buffer->skb = NULL; | 1029 | buffer->skb = NULL; |
1025 | } | 1030 | } |
1026 | 1031 | ||
1027 | last_consumer = get_next_index(last_consumer, | 1032 | sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc); |
1028 | adapter->num_txd); | ||
1029 | if (++count >= MAX_STATUS_HANDLE) | 1033 | if (++count >= MAX_STATUS_HANDLE) |
1030 | break; | 1034 | break; |
1031 | } | 1035 | } |
1032 | 1036 | ||
1033 | if (count) { | 1037 | if (count) { |
1034 | adapter->last_cmd_consumer = last_consumer; | 1038 | tx_ring->sw_consumer = sw_consumer; |
1035 | smp_mb(); | 1039 | smp_mb(); |
1036 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { | 1040 | if (netif_queue_stopped(netdev) && netif_running(netdev)) { |
1037 | netif_tx_lock(netdev); | 1041 | netif_tx_lock(netdev); |
@@ -1053,9 +1057,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1053 | * There is still a possible race condition and the host could miss an | 1057 | * There is still a possible race condition and the host could miss an |
1054 | * interrupt. The card has to take care of this. | 1058 | * interrupt. The card has to take care of this. |
1055 | */ | 1059 | */ |
1056 | barrier(); /* cmd_consumer can change underneath */ | 1060 | barrier(); /* hw_consumer can change underneath */ |
1057 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1061 | hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer)); |
1058 | done = (last_consumer == consumer); | 1062 | done = (sw_consumer == hw_consumer); |
1059 | spin_unlock(&adapter->tx_clean_lock); | 1063 | spin_unlock(&adapter->tx_clean_lock); |
1060 | 1064 | ||
1061 | return (done); | 1065 | return (done); |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 853dee8057d9..22b2d491c782 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -107,10 +107,10 @@ static uint32_t crb_cmd_producer[4] = { | |||
107 | 107 | ||
108 | void | 108 | void |
109 | netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, | 109 | netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, |
110 | uint32_t crb_producer) | 110 | struct nx_host_tx_ring *tx_ring, u32 producer) |
111 | { | 111 | { |
112 | adapter->pci_write_normalize(adapter, | 112 | adapter->pci_write_normalize(adapter, |
113 | adapter->crb_addr_cmd_producer, crb_producer); | 113 | tx_ring->crb_cmd_producer, producer); |
114 | } | 114 | } |
115 | 115 | ||
116 | static uint32_t crb_cmd_consumer[4] = { | 116 | static uint32_t crb_cmd_consumer[4] = { |
@@ -120,10 +120,10 @@ static uint32_t crb_cmd_consumer[4] = { | |||
120 | 120 | ||
121 | static inline void | 121 | static inline void |
122 | netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, | 122 | netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter, |
123 | u32 crb_consumer) | 123 | struct nx_host_tx_ring *tx_ring, u32 consumer) |
124 | { | 124 | { |
125 | adapter->pci_write_normalize(adapter, | 125 | adapter->pci_write_normalize(adapter, |
126 | adapter->crb_addr_cmd_consumer, crb_consumer); | 126 | tx_ring->crb_cmd_consumer, consumer); |
127 | } | 127 | } |
128 | 128 | ||
129 | static uint32_t msi_tgt_status[8] = { | 129 | static uint32_t msi_tgt_status[8] = { |
@@ -814,6 +814,7 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
814 | struct pci_dev *pdev = adapter->pdev; | 814 | struct pci_dev *pdev = adapter->pdev; |
815 | int err, ring; | 815 | int err, ring; |
816 | struct nx_host_rds_ring *rds_ring; | 816 | struct nx_host_rds_ring *rds_ring; |
817 | struct nx_host_tx_ring *tx_ring; | ||
817 | 818 | ||
818 | err = netxen_init_firmware(adapter); | 819 | err = netxen_init_firmware(adapter); |
819 | if (err != 0) { | 820 | if (err != 0) { |
@@ -843,13 +844,12 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
843 | } | 844 | } |
844 | 845 | ||
845 | if (adapter->fw_major < 4) { | 846 | if (adapter->fw_major < 4) { |
846 | adapter->crb_addr_cmd_producer = | 847 | tx_ring = &adapter->tx_ring; |
847 | crb_cmd_producer[adapter->portnum]; | 848 | tx_ring->crb_cmd_producer = crb_cmd_producer[adapter->portnum]; |
848 | adapter->crb_addr_cmd_consumer = | 849 | tx_ring->crb_cmd_consumer = crb_cmd_consumer[adapter->portnum]; |
849 | crb_cmd_consumer[adapter->portnum]; | ||
850 | 850 | ||
851 | netxen_nic_update_cmd_producer(adapter, 0); | 851 | netxen_nic_update_cmd_producer(adapter, tx_ring, 0); |
852 | netxen_nic_update_cmd_consumer(adapter, 0); | 852 | netxen_nic_update_cmd_consumer(adapter, tx_ring, 0); |
853 | } | 853 | } |
854 | 854 | ||
855 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 855 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
@@ -1304,7 +1304,7 @@ static int | |||
1304 | netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1304 | netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
1305 | { | 1305 | { |
1306 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1306 | struct netxen_adapter *adapter = netdev_priv(netdev); |
1307 | struct netxen_hardware_context *hw = &adapter->ahw; | 1307 | struct nx_host_tx_ring *tx_ring = &adapter->tx_ring; |
1308 | unsigned int first_seg_len = skb->len - skb->data_len; | 1308 | unsigned int first_seg_len = skb->len - skb->data_len; |
1309 | struct netxen_cmd_buffer *pbuf; | 1309 | struct netxen_cmd_buffer *pbuf; |
1310 | struct netxen_skb_frag *buffrag; | 1310 | struct netxen_skb_frag *buffrag; |
@@ -1315,28 +1315,26 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1315 | 1315 | ||
1316 | u32 producer, consumer; | 1316 | u32 producer, consumer; |
1317 | int frag_count, no_of_desc; | 1317 | int frag_count, no_of_desc; |
1318 | u32 num_txd = adapter->num_txd; | 1318 | u32 num_txd = tx_ring->num_desc; |
1319 | bool is_tso = false; | 1319 | bool is_tso = false; |
1320 | 1320 | ||
1321 | frag_count = skb_shinfo(skb)->nr_frags + 1; | 1321 | frag_count = skb_shinfo(skb)->nr_frags + 1; |
1322 | 1322 | ||
1323 | /* There 4 fragments per descriptor */ | 1323 | /* 4 fragments per cmd des */ |
1324 | no_of_desc = (frag_count + 3) >> 2; | 1324 | no_of_desc = (frag_count + 3) >> 2; |
1325 | 1325 | ||
1326 | producer = adapter->cmd_producer; | 1326 | producer = tx_ring->producer; |
1327 | smp_mb(); | 1327 | smp_mb(); |
1328 | consumer = adapter->last_cmd_consumer; | 1328 | consumer = tx_ring->sw_consumer; |
1329 | if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) { | 1329 | if ((no_of_desc+2) > find_diff_among(producer, consumer, num_txd)) { |
1330 | netif_stop_queue(netdev); | 1330 | netif_stop_queue(netdev); |
1331 | smp_mb(); | 1331 | smp_mb(); |
1332 | return NETDEV_TX_BUSY; | 1332 | return NETDEV_TX_BUSY; |
1333 | } | 1333 | } |
1334 | 1334 | ||
1335 | /* Copy the descriptors into the hardware */ | 1335 | hwdesc = &tx_ring->desc_head[producer]; |
1336 | hwdesc = &hw->cmd_desc_head[producer]; | ||
1337 | netxen_clear_cmddesc((u64 *)hwdesc); | 1336 | netxen_clear_cmddesc((u64 *)hwdesc); |
1338 | /* Take skb->data itself */ | 1337 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
1339 | pbuf = &adapter->cmd_buf_arr[producer]; | ||
1340 | 1338 | ||
1341 | is_tso = netxen_tso_check(netdev, hwdesc, skb); | 1339 | is_tso = netxen_tso_check(netdev, hwdesc, skb); |
1342 | 1340 | ||
@@ -1365,9 +1363,9 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1365 | if ((i & 0x3) == 0) { | 1363 | if ((i & 0x3) == 0) { |
1366 | k = 0; | 1364 | k = 0; |
1367 | producer = get_next_index(producer, num_txd); | 1365 | producer = get_next_index(producer, num_txd); |
1368 | hwdesc = &hw->cmd_desc_head[producer]; | 1366 | hwdesc = &tx_ring->desc_head[producer]; |
1369 | netxen_clear_cmddesc((u64 *)hwdesc); | 1367 | netxen_clear_cmddesc((u64 *)hwdesc); |
1370 | pbuf = &adapter->cmd_buf_arr[producer]; | 1368 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
1371 | pbuf->skb = NULL; | 1369 | pbuf->skb = NULL; |
1372 | } | 1370 | } |
1373 | frag = &skb_shinfo(skb)->frags[i - 1]; | 1371 | frag = &skb_shinfo(skb)->frags[i - 1]; |
@@ -1419,8 +1417,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1419 | more_hdr = 0; | 1417 | more_hdr = 0; |
1420 | } | 1418 | } |
1421 | /* copy the MAC/IP/TCP headers to the cmd descriptor list */ | 1419 | /* copy the MAC/IP/TCP headers to the cmd descriptor list */ |
1422 | hwdesc = &hw->cmd_desc_head[producer]; | 1420 | hwdesc = &tx_ring->desc_head[producer]; |
1423 | pbuf = &adapter->cmd_buf_arr[producer]; | 1421 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
1424 | pbuf->skb = NULL; | 1422 | pbuf->skb = NULL; |
1425 | 1423 | ||
1426 | /* copy the first 64 bytes */ | 1424 | /* copy the first 64 bytes */ |
@@ -1429,8 +1427,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1429 | producer = get_next_index(producer, num_txd); | 1427 | producer = get_next_index(producer, num_txd); |
1430 | 1428 | ||
1431 | if (more_hdr) { | 1429 | if (more_hdr) { |
1432 | hwdesc = &hw->cmd_desc_head[producer]; | 1430 | hwdesc = &tx_ring->desc_head[producer]; |
1433 | pbuf = &adapter->cmd_buf_arr[producer]; | 1431 | pbuf = &tx_ring->cmd_buf_arr[producer]; |
1434 | pbuf->skb = NULL; | 1432 | pbuf->skb = NULL; |
1435 | /* copy the next 64 bytes - should be enough except | 1433 | /* copy the next 64 bytes - should be enough except |
1436 | * for pathological case | 1434 | * for pathological case |
@@ -1443,10 +1441,10 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1443 | } | 1441 | } |
1444 | } | 1442 | } |
1445 | 1443 | ||
1446 | adapter->cmd_producer = producer; | 1444 | tx_ring->producer = producer; |
1447 | adapter->stats.txbytes += skb->len; | 1445 | adapter->stats.txbytes += skb->len; |
1448 | 1446 | ||
1449 | netxen_nic_update_cmd_producer(adapter, adapter->cmd_producer); | 1447 | netxen_nic_update_cmd_producer(adapter, tx_ring, producer); |
1450 | 1448 | ||
1451 | adapter->stats.xmitcalled++; | 1449 | adapter->stats.xmitcalled++; |
1452 | netdev->trans_start = jiffies; | 1450 | netdev->trans_start = jiffies; |