diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 7 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_ctx.c | 130 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_ethtool.c | 11 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_hdr.h | 6 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 213 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 30 |
6 files changed, 178 insertions, 219 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index b5c0d66daf7e..cde8e70b6b08 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -1280,7 +1280,7 @@ struct netxen_adapter { | |||
1280 | * Receive instances. These can be either one per port, | 1280 | * Receive instances. These can be either one per port, |
1281 | * or one per peg, etc. | 1281 | * or one per peg, etc. |
1282 | */ | 1282 | */ |
1283 | struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; | 1283 | struct netxen_recv_context recv_ctx; |
1284 | 1284 | ||
1285 | int is_up; | 1285 | int is_up; |
1286 | struct netxen_dummy_dma dummy_dma; | 1286 | struct netxen_dummy_dma dummy_dma; |
@@ -1464,10 +1464,9 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); | |||
1464 | int netxen_init_firmware(struct netxen_adapter *adapter); | 1464 | int netxen_init_firmware(struct netxen_adapter *adapter); |
1465 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); | 1465 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); |
1466 | void netxen_watchdog_task(struct work_struct *work); | 1466 | void netxen_watchdog_task(struct work_struct *work); |
1467 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, | 1467 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid); |
1468 | u32 ringid); | ||
1469 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); | 1468 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); |
1470 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); | 1469 | int netxen_process_rcv_ring(struct netxen_adapter *adapter, int max); |
1471 | void netxen_p2_nic_set_multi(struct net_device *netdev); | 1470 | void netxen_p2_nic_set_multi(struct net_device *netdev); |
1472 | void netxen_p3_nic_set_multi(struct net_device *netdev); | 1471 | void netxen_p3_nic_set_multi(struct net_device *netdev); |
1473 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter); | 1472 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter); |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 3e437065023d..d125dca0131a 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -141,7 +141,7 @@ int | |||
141 | nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) | 141 | nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) |
142 | { | 142 | { |
143 | u32 rcode = NX_RCODE_SUCCESS; | 143 | u32 rcode = NX_RCODE_SUCCESS; |
144 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | 144 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
145 | 145 | ||
146 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) | 146 | if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) |
147 | rcode = netxen_issue_cmd(adapter, | 147 | rcode = netxen_issue_cmd(adapter, |
@@ -179,7 +179,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | |||
179 | 179 | ||
180 | int err; | 180 | int err; |
181 | 181 | ||
182 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | 182 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
183 | 183 | ||
184 | /* only one sds ring for now */ | 184 | /* only one sds ring for now */ |
185 | nrds_rings = adapter->max_rds_rings; | 185 | nrds_rings = adapter->max_rds_rings; |
@@ -292,7 +292,7 @@ out_free_rq: | |||
292 | static void | 292 | static void |
293 | nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) | 293 | nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) |
294 | { | 294 | { |
295 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0]; | 295 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
296 | 296 | ||
297 | if (netxen_issue_cmd(adapter, | 297 | if (netxen_issue_cmd(adapter, |
298 | adapter->ahw.pci_func, | 298 | adapter->ahw.pci_func, |
@@ -488,7 +488,7 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) | |||
488 | { | 488 | { |
489 | struct netxen_recv_context *recv_ctx; | 489 | struct netxen_recv_context *recv_ctx; |
490 | struct nx_host_rds_ring *rds_ring; | 490 | struct nx_host_rds_ring *rds_ring; |
491 | int ctx, ring; | 491 | int ring; |
492 | int func_id = adapter->portnum; | 492 | int func_id = adapter->portnum; |
493 | 493 | ||
494 | adapter->ctx_desc->cmd_ring_addr = | 494 | adapter->ctx_desc->cmd_ring_addr = |
@@ -496,22 +496,20 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) | |||
496 | adapter->ctx_desc->cmd_ring_size = | 496 | adapter->ctx_desc->cmd_ring_size = |
497 | cpu_to_le32(adapter->max_tx_desc_count); | 497 | cpu_to_le32(adapter->max_tx_desc_count); |
498 | 498 | ||
499 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 499 | recv_ctx = &adapter->recv_ctx; |
500 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
501 | 500 | ||
502 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 501 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
503 | rds_ring = &recv_ctx->rds_rings[ring]; | 502 | rds_ring = &recv_ctx->rds_rings[ring]; |
504 | 503 | ||
505 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr = | 504 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr = |
506 | cpu_to_le64(rds_ring->phys_addr); | 505 | cpu_to_le64(rds_ring->phys_addr); |
507 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = | 506 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = |
508 | cpu_to_le32(rds_ring->max_rx_desc_count); | 507 | cpu_to_le32(rds_ring->max_rx_desc_count); |
509 | } | ||
510 | adapter->ctx_desc->sts_ring_addr = | ||
511 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | ||
512 | adapter->ctx_desc->sts_ring_size = | ||
513 | cpu_to_le32(adapter->max_rx_desc_count); | ||
514 | } | 508 | } |
509 | adapter->ctx_desc->sts_ring_addr = | ||
510 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | ||
511 | adapter->ctx_desc->sts_ring_size = | ||
512 | cpu_to_le32(adapter->max_rx_desc_count); | ||
515 | 513 | ||
516 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), | 514 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), |
517 | lower32(adapter->ctx_desc_phys_addr)); | 515 | lower32(adapter->ctx_desc_phys_addr)); |
@@ -533,7 +531,7 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
533 | u32 state = 0; | 531 | u32 state = 0; |
534 | void *addr; | 532 | void *addr; |
535 | int err = 0; | 533 | int err = 0; |
536 | int ctx, ring; | 534 | int ring; |
537 | struct netxen_recv_context *recv_ctx; | 535 | struct netxen_recv_context *recv_ctx; |
538 | struct nx_host_rds_ring *rds_ring; | 536 | struct nx_host_rds_ring *rds_ring; |
539 | 537 | ||
@@ -575,48 +573,46 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
575 | 573 | ||
576 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; | 574 | hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; |
577 | 575 | ||
578 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 576 | recv_ctx = &adapter->recv_ctx; |
579 | recv_ctx = &adapter->recv_ctx[ctx]; | ||
580 | |||
581 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | ||
582 | /* rx desc ring */ | ||
583 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
584 | addr = pci_alloc_consistent(adapter->pdev, | ||
585 | RCV_DESC_RINGSIZE, | ||
586 | &rds_ring->phys_addr); | ||
587 | if (addr == NULL) { | ||
588 | printk(KERN_ERR "%s failed to allocate rx " | ||
589 | "desc ring[%d]\n", | ||
590 | netxen_nic_driver_name, ring); | ||
591 | err = -ENOMEM; | ||
592 | goto err_out_free; | ||
593 | } | ||
594 | rds_ring->desc_head = (struct rcv_desc *)addr; | ||
595 | |||
596 | if (adapter->fw_major < 4) | ||
597 | rds_ring->crb_rcv_producer = | ||
598 | recv_crb_registers[adapter->portnum]. | ||
599 | crb_rcv_producer[ring]; | ||
600 | } | ||
601 | 577 | ||
602 | /* status desc ring */ | 578 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
579 | /* rx desc ring */ | ||
580 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
603 | addr = pci_alloc_consistent(adapter->pdev, | 581 | addr = pci_alloc_consistent(adapter->pdev, |
604 | STATUS_DESC_RINGSIZE, | 582 | RCV_DESC_RINGSIZE, |
605 | &recv_ctx->rcv_status_desc_phys_addr); | 583 | &rds_ring->phys_addr); |
606 | if (addr == NULL) { | 584 | if (addr == NULL) { |
607 | printk(KERN_ERR "%s failed to allocate sts desc ring\n", | 585 | printk(KERN_ERR "%s failed to allocate rx " |
608 | netxen_nic_driver_name); | 586 | "desc ring[%d]\n", |
587 | netxen_nic_driver_name, ring); | ||
609 | err = -ENOMEM; | 588 | err = -ENOMEM; |
610 | goto err_out_free; | 589 | goto err_out_free; |
611 | } | 590 | } |
612 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | 591 | rds_ring->desc_head = (struct rcv_desc *)addr; |
613 | 592 | ||
614 | if (adapter->fw_major < 4) | 593 | if (adapter->fw_major < 4) |
615 | recv_ctx->crb_sts_consumer = | 594 | rds_ring->crb_rcv_producer = |
616 | recv_crb_registers[adapter->portnum]. | 595 | recv_crb_registers[adapter->portnum]. |
617 | crb_sts_consumer; | 596 | crb_rcv_producer[ring]; |
618 | } | 597 | } |
619 | 598 | ||
599 | /* status desc ring */ | ||
600 | addr = pci_alloc_consistent(adapter->pdev, | ||
601 | STATUS_DESC_RINGSIZE, | ||
602 | &recv_ctx->rcv_status_desc_phys_addr); | ||
603 | if (addr == NULL) { | ||
604 | printk(KERN_ERR "%s failed to allocate sts desc ring\n", | ||
605 | netxen_nic_driver_name); | ||
606 | err = -ENOMEM; | ||
607 | goto err_out_free; | ||
608 | } | ||
609 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | ||
610 | |||
611 | if (adapter->fw_major < 4) | ||
612 | recv_ctx->crb_sts_consumer = | ||
613 | recv_crb_registers[adapter->portnum]. | ||
614 | crb_sts_consumer; | ||
615 | |||
620 | if (adapter->fw_major >= 4) { | 616 | if (adapter->fw_major >= 4) { |
621 | adapter->intr_scheme = INTR_SCHEME_PERPORT; | 617 | adapter->intr_scheme = INTR_SCHEME_PERPORT; |
622 | adapter->msi_mode = MSI_MODE_MULTIFUNC; | 618 | adapter->msi_mode = MSI_MODE_MULTIFUNC; |
@@ -654,7 +650,7 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
654 | { | 650 | { |
655 | struct netxen_recv_context *recv_ctx; | 651 | struct netxen_recv_context *recv_ctx; |
656 | struct nx_host_rds_ring *rds_ring; | 652 | struct nx_host_rds_ring *rds_ring; |
657 | int ctx, ring; | 653 | int ring; |
658 | 654 | ||
659 | if (adapter->fw_major >= 4) { | 655 | if (adapter->fw_major >= 4) { |
660 | nx_fw_cmd_destroy_tx_ctx(adapter); | 656 | nx_fw_cmd_destroy_tx_ctx(adapter); |
@@ -679,27 +675,25 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
679 | adapter->ahw.cmd_desc_head = NULL; | 675 | adapter->ahw.cmd_desc_head = NULL; |
680 | } | 676 | } |
681 | 677 | ||
682 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 678 | recv_ctx = &adapter->recv_ctx; |
683 | recv_ctx = &adapter->recv_ctx[ctx]; | 679 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
684 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 680 | rds_ring = &recv_ctx->rds_rings[ring]; |
685 | rds_ring = &recv_ctx->rds_rings[ring]; | ||
686 | |||
687 | if (rds_ring->desc_head != NULL) { | ||
688 | pci_free_consistent(adapter->pdev, | ||
689 | RCV_DESC_RINGSIZE, | ||
690 | rds_ring->desc_head, | ||
691 | rds_ring->phys_addr); | ||
692 | rds_ring->desc_head = NULL; | ||
693 | } | ||
694 | } | ||
695 | 681 | ||
696 | if (recv_ctx->rcv_status_desc_head != NULL) { | 682 | if (rds_ring->desc_head != NULL) { |
697 | pci_free_consistent(adapter->pdev, | 683 | pci_free_consistent(adapter->pdev, |
698 | STATUS_DESC_RINGSIZE, | 684 | RCV_DESC_RINGSIZE, |
699 | recv_ctx->rcv_status_desc_head, | 685 | rds_ring->desc_head, |
700 | recv_ctx->rcv_status_desc_phys_addr); | 686 | rds_ring->phys_addr); |
701 | recv_ctx->rcv_status_desc_head = NULL; | 687 | rds_ring->desc_head = NULL; |
702 | } | 688 | } |
703 | } | 689 | } |
690 | |||
691 | if (recv_ctx->rcv_status_desc_head != NULL) { | ||
692 | pci_free_consistent(adapter->pdev, | ||
693 | STATUS_DESC_RINGSIZE, | ||
694 | recv_ctx->rcv_status_desc_head, | ||
695 | recv_ctx->rcv_status_desc_phys_addr); | ||
696 | recv_ctx->rcv_status_desc_head = NULL; | ||
697 | } | ||
704 | } | 698 | } |
705 | 699 | ||
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c index 6b25121cfc1b..f811880a57c5 100644 --- a/drivers/net/netxen/netxen_nic_ethtool.c +++ b/drivers/net/netxen/netxen_nic_ethtool.c | |||
@@ -474,16 +474,13 @@ static void | |||
474 | netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) | 474 | netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) |
475 | { | 475 | { |
476 | struct netxen_adapter *adapter = netdev_priv(dev); | 476 | struct netxen_adapter *adapter = netdev_priv(dev); |
477 | int i; | ||
478 | 477 | ||
479 | ring->rx_pending = 0; | 478 | ring->rx_pending = 0; |
480 | ring->rx_jumbo_pending = 0; | 479 | ring->rx_jumbo_pending = 0; |
481 | for (i = 0; i < MAX_RCV_CTX; ++i) { | 480 | ring->rx_pending += adapter->recv_ctx. |
482 | ring->rx_pending += adapter->recv_ctx[i]. | 481 | rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; |
483 | rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; | 482 | ring->rx_jumbo_pending += adapter->recv_ctx. |
484 | ring->rx_jumbo_pending += adapter->recv_ctx[i]. | 483 | rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; |
485 | rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; | ||
486 | } | ||
487 | ring->tx_pending = adapter->max_tx_desc_count; | 484 | ring->tx_pending = adapter->max_tx_desc_count; |
488 | 485 | ||
489 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) | 486 | if (adapter->ahw.board_type == NETXEN_NIC_GBE) |
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index e589d4bbd9b3..016c62129c76 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h | |||
@@ -363,12 +363,6 @@ enum { | |||
363 | #define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ | 363 | #define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \ |
364 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) | 364 | ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR) |
365 | 365 | ||
366 | /* | ||
367 | * MAX_RCV_CTX : The number of receive contexts that are available on | ||
368 | * the phantom. | ||
369 | */ | ||
370 | #define MAX_RCV_CTX 1 | ||
371 | |||
372 | #define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) | 366 | #define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034) |
373 | #define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) | 367 | #define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014) |
374 | #define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) | 368 | #define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000) |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 72aba634554a..bd5e0d692230 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -49,8 +49,8 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; | |||
49 | 49 | ||
50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff | 50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff |
51 | 51 | ||
52 | static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | 52 | static void |
53 | uint32_t ctx, uint32_t ringid); | 53 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid); |
54 | 54 | ||
55 | static void crb_addr_transform_setup(void) | 55 | static void crb_addr_transform_setup(void) |
56 | { | 56 | { |
@@ -148,23 +148,21 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter) | |||
148 | struct netxen_recv_context *recv_ctx; | 148 | struct netxen_recv_context *recv_ctx; |
149 | struct nx_host_rds_ring *rds_ring; | 149 | struct nx_host_rds_ring *rds_ring; |
150 | struct netxen_rx_buffer *rx_buf; | 150 | struct netxen_rx_buffer *rx_buf; |
151 | int i, ctxid, ring; | 151 | int i, ring; |
152 | 152 | ||
153 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | 153 | recv_ctx = &adapter->recv_ctx; |
154 | recv_ctx = &adapter->recv_ctx[ctxid]; | 154 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
155 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 155 | rds_ring = &recv_ctx->rds_rings[ring]; |
156 | rds_ring = &recv_ctx->rds_rings[ring]; | 156 | for (i = 0; i < rds_ring->max_rx_desc_count; ++i) { |
157 | for (i = 0; i < rds_ring->max_rx_desc_count; ++i) { | 157 | rx_buf = &(rds_ring->rx_buf_arr[i]); |
158 | rx_buf = &(rds_ring->rx_buf_arr[i]); | 158 | if (rx_buf->state == NETXEN_BUFFER_FREE) |
159 | if (rx_buf->state == NETXEN_BUFFER_FREE) | 159 | continue; |
160 | continue; | 160 | pci_unmap_single(adapter->pdev, |
161 | pci_unmap_single(adapter->pdev, | 161 | rx_buf->dma, |
162 | rx_buf->dma, | 162 | rds_ring->dma_size, |
163 | rds_ring->dma_size, | 163 | PCI_DMA_FROMDEVICE); |
164 | PCI_DMA_FROMDEVICE); | 164 | if (rx_buf->skb != NULL) |
165 | if (rx_buf->skb != NULL) | 165 | dev_kfree_skb_any(rx_buf->skb); |
166 | dev_kfree_skb_any(rx_buf->skb); | ||
167 | } | ||
168 | } | 166 | } |
169 | } | 167 | } |
170 | } | 168 | } |
@@ -205,18 +203,17 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
205 | { | 203 | { |
206 | struct netxen_recv_context *recv_ctx; | 204 | struct netxen_recv_context *recv_ctx; |
207 | struct nx_host_rds_ring *rds_ring; | 205 | struct nx_host_rds_ring *rds_ring; |
208 | int ctx, ring; | 206 | int ring; |
209 | 207 | ||
210 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | 208 | recv_ctx = &adapter->recv_ctx; |
211 | recv_ctx = &adapter->recv_ctx[ctx]; | 209 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
212 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 210 | rds_ring = &recv_ctx->rds_rings[ring]; |
213 | rds_ring = &recv_ctx->rds_rings[ring]; | 211 | if (rds_ring->rx_buf_arr) { |
214 | if (rds_ring->rx_buf_arr) { | 212 | vfree(rds_ring->rx_buf_arr); |
215 | vfree(rds_ring->rx_buf_arr); | 213 | rds_ring->rx_buf_arr = NULL; |
216 | rds_ring->rx_buf_arr = NULL; | ||
217 | } | ||
218 | } | 214 | } |
219 | } | 215 | } |
216 | |||
220 | if (adapter->cmd_buf_arr) | 217 | if (adapter->cmd_buf_arr) |
221 | vfree(adapter->cmd_buf_arr); | 218 | vfree(adapter->cmd_buf_arr); |
222 | return; | 219 | return; |
@@ -227,7 +224,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
227 | struct netxen_recv_context *recv_ctx; | 224 | struct netxen_recv_context *recv_ctx; |
228 | struct nx_host_rds_ring *rds_ring; | 225 | struct nx_host_rds_ring *rds_ring; |
229 | struct netxen_rx_buffer *rx_buf; | 226 | struct netxen_rx_buffer *rx_buf; |
230 | int ctx, ring, i, num_rx_bufs; | 227 | int ring, i, num_rx_bufs; |
231 | 228 | ||
232 | struct netxen_cmd_buffer *cmd_buf_arr; | 229 | struct netxen_cmd_buffer *cmd_buf_arr; |
233 | struct net_device *netdev = adapter->netdev; | 230 | struct net_device *netdev = adapter->netdev; |
@@ -241,74 +238,72 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
241 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | 238 | memset(cmd_buf_arr, 0, TX_RINGSIZE); |
242 | adapter->cmd_buf_arr = cmd_buf_arr; | 239 | adapter->cmd_buf_arr = cmd_buf_arr; |
243 | 240 | ||
244 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | 241 | recv_ctx = &adapter->recv_ctx; |
245 | recv_ctx = &adapter->recv_ctx[ctx]; | 242 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
246 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 243 | rds_ring = &recv_ctx->rds_rings[ring]; |
247 | rds_ring = &recv_ctx->rds_rings[ring]; | 244 | switch (RCV_DESC_TYPE(ring)) { |
248 | switch (RCV_DESC_TYPE(ring)) { | 245 | case RCV_DESC_NORMAL: |
249 | case RCV_DESC_NORMAL: | 246 | rds_ring->max_rx_desc_count = |
250 | rds_ring->max_rx_desc_count = | 247 | adapter->max_rx_desc_count; |
251 | adapter->max_rx_desc_count; | 248 | rds_ring->flags = RCV_DESC_NORMAL; |
252 | rds_ring->flags = RCV_DESC_NORMAL; | 249 | if (adapter->ahw.cut_through) { |
253 | if (adapter->ahw.cut_through) { | 250 | rds_ring->dma_size = |
254 | rds_ring->dma_size = | 251 | NX_CT_DEFAULT_RX_BUF_LEN; |
255 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
256 | rds_ring->skb_size = | ||
257 | NX_CT_DEFAULT_RX_BUF_LEN; | ||
258 | } else { | ||
259 | rds_ring->dma_size = RX_DMA_MAP_LEN; | ||
260 | rds_ring->skb_size = | ||
261 | MAX_RX_BUFFER_LENGTH; | ||
262 | } | ||
263 | break; | ||
264 | |||
265 | case RCV_DESC_JUMBO: | ||
266 | rds_ring->max_rx_desc_count = | ||
267 | adapter->max_jumbo_rx_desc_count; | ||
268 | rds_ring->flags = RCV_DESC_JUMBO; | ||
269 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | ||
270 | rds_ring->dma_size = | ||
271 | NX_P3_RX_JUMBO_BUF_MAX_LEN; | ||
272 | else | ||
273 | rds_ring->dma_size = | ||
274 | NX_P2_RX_JUMBO_BUF_MAX_LEN; | ||
275 | rds_ring->skb_size = | 252 | rds_ring->skb_size = |
276 | rds_ring->dma_size + NET_IP_ALIGN; | 253 | NX_CT_DEFAULT_RX_BUF_LEN; |
277 | break; | 254 | } else { |
255 | rds_ring->dma_size = RX_DMA_MAP_LEN; | ||
256 | rds_ring->skb_size = | ||
257 | MAX_RX_BUFFER_LENGTH; | ||
258 | } | ||
259 | break; | ||
278 | 260 | ||
279 | case RCV_RING_LRO: | 261 | case RCV_DESC_JUMBO: |
280 | rds_ring->max_rx_desc_count = | 262 | rds_ring->max_rx_desc_count = |
281 | adapter->max_lro_rx_desc_count; | 263 | adapter->max_jumbo_rx_desc_count; |
282 | rds_ring->flags = RCV_DESC_LRO; | 264 | rds_ring->flags = RCV_DESC_JUMBO; |
283 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; | 265 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) |
284 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; | 266 | rds_ring->dma_size = |
285 | break; | 267 | NX_P3_RX_JUMBO_BUF_MAX_LEN; |
268 | else | ||
269 | rds_ring->dma_size = | ||
270 | NX_P2_RX_JUMBO_BUF_MAX_LEN; | ||
271 | rds_ring->skb_size = | ||
272 | rds_ring->dma_size + NET_IP_ALIGN; | ||
273 | break; | ||
286 | 274 | ||
287 | } | 275 | case RCV_RING_LRO: |
288 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) | 276 | rds_ring->max_rx_desc_count = |
289 | vmalloc(RCV_BUFFSIZE); | 277 | adapter->max_lro_rx_desc_count; |
290 | if (rds_ring->rx_buf_arr == NULL) { | 278 | rds_ring->flags = RCV_DESC_LRO; |
291 | printk(KERN_ERR "%s: Failed to allocate " | 279 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; |
292 | "rx buffer ring %d\n", | 280 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; |
293 | netdev->name, ring); | 281 | break; |
294 | /* free whatever was already allocated */ | 282 | |
295 | goto err_out; | 283 | } |
296 | } | 284 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) |
297 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 285 | vmalloc(RCV_BUFFSIZE); |
298 | INIT_LIST_HEAD(&rds_ring->free_list); | 286 | if (rds_ring->rx_buf_arr == NULL) { |
299 | /* | 287 | printk(KERN_ERR "%s: Failed to allocate " |
300 | * Now go through all of them, set reference handles | 288 | "rx buffer ring %d\n", |
301 | * and put them in the queues. | 289 | netdev->name, ring); |
302 | */ | 290 | /* free whatever was already allocated */ |
303 | num_rx_bufs = rds_ring->max_rx_desc_count; | 291 | goto err_out; |
304 | rx_buf = rds_ring->rx_buf_arr; | 292 | } |
305 | for (i = 0; i < num_rx_bufs; i++) { | 293 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); |
306 | list_add_tail(&rx_buf->list, | 294 | INIT_LIST_HEAD(&rds_ring->free_list); |
307 | &rds_ring->free_list); | 295 | /* |
308 | rx_buf->ref_handle = i; | 296 | * Now go through all of them, set reference handles |
309 | rx_buf->state = NETXEN_BUFFER_FREE; | 297 | * and put them in the queues. |
310 | rx_buf++; | 298 | */ |
311 | } | 299 | num_rx_bufs = rds_ring->max_rx_desc_count; |
300 | rx_buf = rds_ring->rx_buf_arr; | ||
301 | for (i = 0; i < num_rx_bufs; i++) { | ||
302 | list_add_tail(&rx_buf->list, | ||
303 | &rds_ring->free_list); | ||
304 | rx_buf->ref_handle = i; | ||
305 | rx_buf->state = NETXEN_BUFFER_FREE; | ||
306 | rx_buf++; | ||
312 | } | 307 | } |
313 | } | 308 | } |
314 | 309 | ||
@@ -838,13 +833,13 @@ no_skb: | |||
838 | return skb; | 833 | return skb; |
839 | } | 834 | } |
840 | 835 | ||
841 | static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | 836 | static void netxen_process_rcv(struct netxen_adapter *adapter, |
842 | struct status_desc *desc) | 837 | struct status_desc *desc) |
843 | { | 838 | { |
844 | struct net_device *netdev = adapter->netdev; | 839 | struct net_device *netdev = adapter->netdev; |
845 | u64 sts_data = le64_to_cpu(desc->status_desc_data); | 840 | u64 sts_data = le64_to_cpu(desc->status_desc_data); |
846 | int index = netxen_get_sts_refhandle(sts_data); | 841 | int index = netxen_get_sts_refhandle(sts_data); |
847 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | 842 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
848 | struct netxen_rx_buffer *buffer; | 843 | struct netxen_rx_buffer *buffer; |
849 | struct sk_buff *skb; | 844 | struct sk_buff *skb; |
850 | u32 length = netxen_get_sts_totallength(sts_data); | 845 | u32 length = netxen_get_sts_totallength(sts_data); |
@@ -902,10 +897,10 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
902 | adapter->stats.rxbytes += length; | 897 | adapter->stats.rxbytes += length; |
903 | } | 898 | } |
904 | 899 | ||
905 | /* Process Receive status ring */ | 900 | int |
906 | u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | 901 | netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) |
907 | { | 902 | { |
908 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | 903 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
909 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 904 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; |
910 | struct status_desc *desc; | 905 | struct status_desc *desc; |
911 | u32 consumer = recv_ctx->status_rx_consumer; | 906 | u32 consumer = recv_ctx->status_rx_consumer; |
@@ -922,7 +917,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
922 | 917 | ||
923 | opcode = netxen_get_sts_opcode(sts_data); | 918 | opcode = netxen_get_sts_opcode(sts_data); |
924 | 919 | ||
925 | netxen_process_rcv(adapter, ctxid, desc); | 920 | netxen_process_rcv(adapter, desc); |
926 | 921 | ||
927 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); | 922 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); |
928 | 923 | ||
@@ -932,7 +927,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
932 | } | 927 | } |
933 | 928 | ||
934 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 929 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
935 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); | 930 | netxen_post_rx_buffers_nodb(adapter, ring); |
936 | 931 | ||
937 | if (count) { | 932 | if (count) { |
938 | recv_ctx->status_rx_consumer = consumer; | 933 | recv_ctx->status_rx_consumer = consumer; |
@@ -1013,14 +1008,12 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
1013 | return (done); | 1008 | return (done); |
1014 | } | 1009 | } |
1015 | 1010 | ||
1016 | /* | 1011 | void |
1017 | * netxen_post_rx_buffers puts buffer in the Phantom memory | 1012 | netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) |
1018 | */ | ||
1019 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | ||
1020 | { | 1013 | { |
1021 | struct pci_dev *pdev = adapter->pdev; | 1014 | struct pci_dev *pdev = adapter->pdev; |
1022 | struct sk_buff *skb; | 1015 | struct sk_buff *skb; |
1023 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1016 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
1024 | struct nx_host_rds_ring *rds_ring = NULL; | 1017 | struct nx_host_rds_ring *rds_ring = NULL; |
1025 | uint producer; | 1018 | uint producer; |
1026 | struct rcv_desc *pdesc; | 1019 | struct rcv_desc *pdesc; |
@@ -1098,12 +1091,12 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1098 | } | 1091 | } |
1099 | } | 1092 | } |
1100 | 1093 | ||
1101 | static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | 1094 | static void |
1102 | uint32_t ctx, uint32_t ringid) | 1095 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) |
1103 | { | 1096 | { |
1104 | struct pci_dev *pdev = adapter->pdev; | 1097 | struct pci_dev *pdev = adapter->pdev; |
1105 | struct sk_buff *skb; | 1098 | struct sk_buff *skb; |
1106 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1099 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
1107 | struct nx_host_rds_ring *rds_ring = NULL; | 1100 | struct nx_host_rds_ring *rds_ring = NULL; |
1108 | u32 producer; | 1101 | u32 producer; |
1109 | struct rcv_desc *pdesc; | 1102 | struct rcv_desc *pdesc; |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 2953a83bc856..3b4d923f947d 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -790,7 +790,7 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
790 | { | 790 | { |
791 | struct net_device *netdev = adapter->netdev; | 791 | struct net_device *netdev = adapter->netdev; |
792 | struct pci_dev *pdev = adapter->pdev; | 792 | struct pci_dev *pdev = adapter->pdev; |
793 | int err, ctx, ring; | 793 | int err, ring; |
794 | 794 | ||
795 | err = netxen_init_firmware(adapter); | 795 | err = netxen_init_firmware(adapter); |
796 | if (err != 0) { | 796 | if (err != 0) { |
@@ -829,10 +829,8 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
829 | netxen_nic_update_cmd_consumer(adapter, 0); | 829 | netxen_nic_update_cmd_consumer(adapter, 0); |
830 | } | 830 | } |
831 | 831 | ||
832 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | 832 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
833 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 833 | netxen_post_rx_buffers(adapter, ring); |
834 | netxen_post_rx_buffers(adapter, ctx, ring); | ||
835 | } | ||
836 | 834 | ||
837 | err = netxen_nic_request_irq(adapter); | 835 | err = netxen_nic_request_irq(adapter); |
838 | if (err) { | 836 | if (err) { |
@@ -1640,30 +1638,14 @@ static irqreturn_t netxen_msix_intr(int irq, void *data) | |||
1640 | 1638 | ||
1641 | static int netxen_nic_poll(struct napi_struct *napi, int budget) | 1639 | static int netxen_nic_poll(struct napi_struct *napi, int budget) |
1642 | { | 1640 | { |
1643 | struct netxen_adapter *adapter = container_of(napi, struct netxen_adapter, napi); | 1641 | struct netxen_adapter *adapter = |
1642 | container_of(napi, struct netxen_adapter, napi); | ||
1644 | int tx_complete; | 1643 | int tx_complete; |
1645 | int ctx; | ||
1646 | int work_done; | 1644 | int work_done; |
1647 | 1645 | ||
1648 | tx_complete = netxen_process_cmd_ring(adapter); | 1646 | tx_complete = netxen_process_cmd_ring(adapter); |
1649 | 1647 | ||
1650 | work_done = 0; | 1648 | work_done = netxen_process_rcv_ring(adapter, budget); |
1651 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
1652 | /* | ||
1653 | * Fairness issue. This will give undue weight to the | ||
1654 | * receive context 0. | ||
1655 | */ | ||
1656 | |||
1657 | /* | ||
1658 | * To avoid starvation, we give each of our receivers, | ||
1659 | * a fraction of the quota. Sometimes, it might happen that we | ||
1660 | * have enough quota to process every packet, but since all the | ||
1661 | * packets are on one context, it gets only half of the quota, | ||
1662 | * and ends up not processing it. | ||
1663 | */ | ||
1664 | work_done += netxen_process_rcv_ring(adapter, ctx, | ||
1665 | budget / MAX_RCV_CTX); | ||
1666 | } | ||
1667 | 1649 | ||
1668 | if ((work_done < budget) && tx_complete) { | 1650 | if ((work_done < budget) && tx_complete) { |
1669 | napi_complete(&adapter->napi); | 1651 | napi_complete(&adapter->napi); |