diff options
Diffstat (limited to 'drivers/net/netxen')
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 74 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_ctx.c | 117 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_hw.c | 47 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 201 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 157 |
5 files changed, 404 insertions, 192 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 56fad22fed9..595171d943f 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -78,16 +78,17 @@ | |||
78 | 78 | ||
79 | #define PHAN_VENDOR_ID 0x4040 | 79 | #define PHAN_VENDOR_ID 0x4040 |
80 | 80 | ||
81 | #define RCV_DESC_RINGSIZE \ | 81 | #define RCV_DESC_RINGSIZE(rds_ring) \ |
82 | (sizeof(struct rcv_desc) * adapter->num_rxd) | 82 | (sizeof(struct rcv_desc) * (rds_ring)->num_desc) |
83 | #define STATUS_DESC_RINGSIZE \ | 83 | #define RCV_BUFF_RINGSIZE(rds_ring) \ |
84 | (sizeof(struct status_desc) * adapter->num_rxd) | ||
85 | #define LRO_DESC_RINGSIZE \ | ||
86 | (sizeof(rcvDesc_t) * adapter->num_lro_rxd) | ||
87 | #define TX_RINGSIZE \ | ||
88 | (sizeof(struct netxen_cmd_buffer) * adapter->num_txd) | ||
89 | #define RCV_BUFFSIZE \ | ||
90 | (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) | 84 | (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc) |
85 | #define STATUS_DESC_RINGSIZE(sds_ring) \ | ||
86 | (sizeof(struct status_desc) * (sds_ring)->num_desc) | ||
87 | #define TX_BUFF_RINGSIZE(adapter) \ | ||
88 | (sizeof(struct netxen_cmd_buffer) * adapter->num_txd) | ||
89 | #define TX_DESC_RINGSIZE(adapter) \ | ||
90 | (sizeof(struct cmd_desc_type0) * adapter->num_txd) | ||
91 | |||
91 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) | 92 | #define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) |
92 | 93 | ||
93 | #define NETXEN_RCV_PRODUCER_OFFSET 0 | 94 | #define NETXEN_RCV_PRODUCER_OFFSET 0 |
@@ -188,7 +189,8 @@ | |||
188 | /* Host writes the following to notify that it has done the init-handshake */ | 189 | /* Host writes the following to notify that it has done the init-handshake */ |
189 | #define PHAN_INITIALIZE_ACK 0xf00f | 190 | #define PHAN_INITIALIZE_ACK 0xf00f |
190 | 191 | ||
191 | #define NUM_RCV_DESC_RINGS 3 /* No of Rcv Descriptor contexts */ | 192 | #define NUM_RCV_DESC_RINGS 3 |
193 | #define NUM_STS_DESC_RINGS 4 | ||
192 | 194 | ||
193 | #define RCV_RING_NORMAL 0 | 195 | #define RCV_RING_NORMAL 0 |
194 | #define RCV_RING_JUMBO 1 | 196 | #define RCV_RING_JUMBO 1 |
@@ -722,7 +724,7 @@ extern char netxen_nic_driver_name[]; | |||
722 | #endif | 724 | #endif |
723 | 725 | ||
724 | /* Number of status descriptors to handle per interrupt */ | 726 | /* Number of status descriptors to handle per interrupt */ |
725 | #define MAX_STATUS_HANDLE (128) | 727 | #define MAX_STATUS_HANDLE (64) |
726 | 728 | ||
727 | /* | 729 | /* |
728 | * netxen_skb_frag{} is to contain mapping info for each SG list. This | 730 | * netxen_skb_frag{} is to contain mapping info for each SG list. This |
@@ -827,17 +829,37 @@ struct netxen_adapter_stats { | |||
827 | */ | 829 | */ |
828 | struct nx_host_rds_ring { | 830 | struct nx_host_rds_ring { |
829 | u32 producer; | 831 | u32 producer; |
830 | u32 crb_rcv_producer; /* reg offset */ | 832 | u32 crb_rcv_producer; |
831 | struct rcv_desc *desc_head; /* address of rx ring in Phantom */ | ||
832 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ | ||
833 | struct list_head free_list; | ||
834 | u32 num_desc; | 833 | u32 num_desc; |
835 | u32 dma_size; | 834 | u32 dma_size; |
836 | u32 skb_size; | 835 | u32 skb_size; |
837 | u32 flags; | 836 | u32 flags; |
837 | struct rcv_desc *desc_head; | ||
838 | struct netxen_rx_buffer *rx_buf_arr; | ||
839 | struct list_head free_list; | ||
840 | spinlock_t lock; | ||
838 | dma_addr_t phys_addr; | 841 | dma_addr_t phys_addr; |
839 | }; | 842 | }; |
840 | 843 | ||
844 | struct nx_host_sds_ring { | ||
845 | u32 consumer; | ||
846 | u32 crb_sts_consumer; | ||
847 | u32 crb_intr_mask; | ||
848 | u32 num_desc; | ||
849 | |||
850 | struct status_desc *desc_head; | ||
851 | struct netxen_adapter *adapter; | ||
852 | struct napi_struct napi; | ||
853 | struct list_head free_list[NUM_RCV_DESC_RINGS]; | ||
854 | |||
855 | u16 clean_tx; | ||
856 | u16 post_rxd; | ||
857 | int irq; | ||
858 | |||
859 | dma_addr_t phys_addr; | ||
860 | char name[IFNAMSIZ+4]; | ||
861 | }; | ||
862 | |||
841 | /* | 863 | /* |
842 | * Receive context. There is one such structure per instance of the | 864 | * Receive context. There is one such structure per instance of the |
843 | * receive processing. Any state information that is relevant to | 865 | * receive processing. Any state information that is relevant to |
@@ -850,10 +872,7 @@ struct netxen_recv_context { | |||
850 | u16 virt_port; | 872 | u16 virt_port; |
851 | 873 | ||
852 | struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS]; | 874 | struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS]; |
853 | u32 status_rx_consumer; | 875 | struct nx_host_sds_ring sds_rings[NUM_STS_DESC_RINGS]; |
854 | u32 crb_sts_consumer; /* reg offset */ | ||
855 | dma_addr_t rcv_status_desc_phys_addr; | ||
856 | struct status_desc *rcv_status_desc_head; | ||
857 | }; | 876 | }; |
858 | 877 | ||
859 | /* New HW context creation */ | 878 | /* New HW context creation */ |
@@ -1179,13 +1198,13 @@ typedef struct { | |||
1179 | #define NETXEN_IS_MSI_FAMILY(adapter) \ | 1198 | #define NETXEN_IS_MSI_FAMILY(adapter) \ |
1180 | ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) | 1199 | ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) |
1181 | 1200 | ||
1182 | #define MSIX_ENTRIES_PER_ADAPTER 1 | 1201 | #define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS |
1183 | #define NETXEN_MSIX_TBL_SPACE 8192 | 1202 | #define NETXEN_MSIX_TBL_SPACE 8192 |
1184 | #define NETXEN_PCI_REG_MSIX_TBL 0x44 | 1203 | #define NETXEN_PCI_REG_MSIX_TBL 0x44 |
1185 | 1204 | ||
1186 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 | 1205 | #define NETXEN_DB_MAPSIZE_BYTES 0x1000 |
1187 | 1206 | ||
1188 | #define NETXEN_NETDEV_WEIGHT 120 | 1207 | #define NETXEN_NETDEV_WEIGHT 128 |
1189 | #define NETXEN_ADAPTER_UP_MAGIC 777 | 1208 | #define NETXEN_ADAPTER_UP_MAGIC 777 |
1190 | #define NETXEN_NIC_PEG_TUNE 0 | 1209 | #define NETXEN_NIC_PEG_TUNE 0 |
1191 | 1210 | ||
@@ -1200,7 +1219,6 @@ struct netxen_adapter { | |||
1200 | struct net_device *netdev; | 1219 | struct net_device *netdev; |
1201 | struct pci_dev *pdev; | 1220 | struct pci_dev *pdev; |
1202 | int pci_using_dac; | 1221 | int pci_using_dac; |
1203 | struct napi_struct napi; | ||
1204 | struct net_device_stats net_stats; | 1222 | struct net_device_stats net_stats; |
1205 | int mtu; | 1223 | int mtu; |
1206 | int portnum; | 1224 | int portnum; |
@@ -1212,7 +1230,6 @@ struct netxen_adapter { | |||
1212 | nx_mac_list_t *mac_list; | 1230 | nx_mac_list_t *mac_list; |
1213 | 1231 | ||
1214 | struct netxen_legacy_intr_set legacy_intr; | 1232 | struct netxen_legacy_intr_set legacy_intr; |
1215 | u32 crb_intr_mask; | ||
1216 | 1233 | ||
1217 | struct work_struct watchdog_task; | 1234 | struct work_struct watchdog_task; |
1218 | struct timer_list watchdog_timer; | 1235 | struct timer_list watchdog_timer; |
@@ -1227,6 +1244,7 @@ struct netxen_adapter { | |||
1227 | u32 last_cmd_consumer; | 1244 | u32 last_cmd_consumer; |
1228 | u32 crb_addr_cmd_producer; | 1245 | u32 crb_addr_cmd_producer; |
1229 | u32 crb_addr_cmd_consumer; | 1246 | u32 crb_addr_cmd_consumer; |
1247 | spinlock_t tx_clean_lock; | ||
1230 | 1248 | ||
1231 | u32 num_txd; | 1249 | u32 num_txd; |
1232 | u32 num_rxd; | 1250 | u32 num_rxd; |
@@ -1234,6 +1252,7 @@ struct netxen_adapter { | |||
1234 | u32 num_lro_rxd; | 1252 | u32 num_lro_rxd; |
1235 | 1253 | ||
1236 | int max_rds_rings; | 1254 | int max_rds_rings; |
1255 | int max_sds_rings; | ||
1237 | 1256 | ||
1238 | u32 flags; | 1257 | u32 flags; |
1239 | u32 irq; | 1258 | u32 irq; |
@@ -1243,8 +1262,7 @@ struct netxen_adapter { | |||
1243 | u32 fw_major; | 1262 | u32 fw_major; |
1244 | u32 fw_version; | 1263 | u32 fw_version; |
1245 | 1264 | ||
1246 | u8 msix_supported; | 1265 | int msix_supported; |
1247 | u8 max_possible_rss_rings; | ||
1248 | struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; | 1266 | struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER]; |
1249 | 1267 | ||
1250 | struct netxen_adapter_stats stats; | 1268 | struct netxen_adapter_stats stats; |
@@ -1447,14 +1465,16 @@ void netxen_initialize_adapter_ops(struct netxen_adapter *adapter); | |||
1447 | int netxen_init_firmware(struct netxen_adapter *adapter); | 1465 | int netxen_init_firmware(struct netxen_adapter *adapter); |
1448 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); | 1466 | void netxen_nic_clear_stats(struct netxen_adapter *adapter); |
1449 | void netxen_watchdog_task(struct work_struct *work); | 1467 | void netxen_watchdog_task(struct work_struct *work); |
1450 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid); | 1468 | void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, |
1469 | struct nx_host_rds_ring *rds_ring); | ||
1451 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); | 1470 | int netxen_process_cmd_ring(struct netxen_adapter *adapter); |
1452 | int netxen_process_rcv_ring(struct netxen_adapter *adapter, int max); | 1471 | int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max); |
1453 | void netxen_p2_nic_set_multi(struct net_device *netdev); | 1472 | void netxen_p2_nic_set_multi(struct net_device *netdev); |
1454 | void netxen_p3_nic_set_multi(struct net_device *netdev); | 1473 | void netxen_p3_nic_set_multi(struct net_device *netdev); |
1455 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter); | 1474 | void netxen_p3_free_mac_list(struct netxen_adapter *adapter); |
1456 | int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); | 1475 | int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32); |
1457 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter); | 1476 | int netxen_config_intr_coalesce(struct netxen_adapter *adapter); |
1477 | int netxen_config_rss(struct netxen_adapter *adapter, int enable); | ||
1458 | 1478 | ||
1459 | int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); | 1479 | int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); |
1460 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); | 1480 | int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); |
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c index 2e66335bd00..9234473bc08 100644 --- a/drivers/net/netxen/netxen_nic_ctx.c +++ b/drivers/net/netxen/netxen_nic_ctx.c | |||
@@ -169,6 +169,7 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | |||
169 | nx_cardrsp_rds_ring_t *prsp_rds; | 169 | nx_cardrsp_rds_ring_t *prsp_rds; |
170 | nx_cardrsp_sds_ring_t *prsp_sds; | 170 | nx_cardrsp_sds_ring_t *prsp_sds; |
171 | struct nx_host_rds_ring *rds_ring; | 171 | struct nx_host_rds_ring *rds_ring; |
172 | struct nx_host_sds_ring *sds_ring; | ||
172 | 173 | ||
173 | dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; | 174 | dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; |
174 | u64 phys_addr; | 175 | u64 phys_addr; |
@@ -181,9 +182,8 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | |||
181 | 182 | ||
182 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | 183 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; |
183 | 184 | ||
184 | /* only one sds ring for now */ | ||
185 | nrds_rings = adapter->max_rds_rings; | 185 | nrds_rings = adapter->max_rds_rings; |
186 | nsds_rings = 1; | 186 | nsds_rings = adapter->max_sds_rings; |
187 | 187 | ||
188 | rq_size = | 188 | rq_size = |
189 | SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); | 189 | SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); |
@@ -239,11 +239,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | |||
239 | prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + | 239 | prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + |
240 | le32_to_cpu(prq->sds_ring_offset)); | 240 | le32_to_cpu(prq->sds_ring_offset)); |
241 | 241 | ||
242 | prq_sds[0].host_phys_addr = | 242 | for (i = 0; i < nsds_rings; i++) { |
243 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | 243 | |
244 | prq_sds[0].ring_size = cpu_to_le32(adapter->num_rxd); | 244 | sds_ring = &recv_ctx->sds_rings[i]; |
245 | /* only one msix vector for now */ | 245 | |
246 | prq_sds[0].msi_index = cpu_to_le16(0); | 246 | prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); |
247 | prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); | ||
248 | prq_sds[i].msi_index = cpu_to_le16(i); | ||
249 | } | ||
247 | 250 | ||
248 | phys_addr = hostrq_phys_addr; | 251 | phys_addr = hostrq_phys_addr; |
249 | err = netxen_issue_cmd(adapter, | 252 | err = netxen_issue_cmd(adapter, |
@@ -272,11 +275,16 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) | |||
272 | 275 | ||
273 | prsp_sds = ((nx_cardrsp_sds_ring_t *) | 276 | prsp_sds = ((nx_cardrsp_sds_ring_t *) |
274 | &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); | 277 | &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); |
275 | reg = le32_to_cpu(prsp_sds[0].host_consumer_crb); | ||
276 | recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200); | ||
277 | 278 | ||
278 | reg = le32_to_cpu(prsp_sds[0].interrupt_crb); | 279 | for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { |
279 | adapter->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200); | 280 | sds_ring = &recv_ctx->sds_rings[i]; |
281 | |||
282 | reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); | ||
283 | sds_ring->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200); | ||
284 | |||
285 | reg = le32_to_cpu(prsp_sds[i].interrupt_crb); | ||
286 | sds_ring->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200); | ||
287 | } | ||
280 | 288 | ||
281 | recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); | 289 | recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); |
282 | recv_ctx->context_id = le16_to_cpu(prsp->context_id); | 290 | recv_ctx->context_id = le16_to_cpu(prsp->context_id); |
@@ -488,6 +496,7 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) | |||
488 | { | 496 | { |
489 | struct netxen_recv_context *recv_ctx; | 497 | struct netxen_recv_context *recv_ctx; |
490 | struct nx_host_rds_ring *rds_ring; | 498 | struct nx_host_rds_ring *rds_ring; |
499 | struct nx_host_sds_ring *sds_ring; | ||
491 | int ring; | 500 | int ring; |
492 | int func_id = adapter->portnum; | 501 | int func_id = adapter->portnum; |
493 | 502 | ||
@@ -506,10 +515,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) | |||
506 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = | 515 | adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size = |
507 | cpu_to_le32(rds_ring->num_desc); | 516 | cpu_to_le32(rds_ring->num_desc); |
508 | } | 517 | } |
509 | adapter->ctx_desc->sts_ring_addr = | 518 | sds_ring = &recv_ctx->sds_rings[0]; |
510 | cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr); | 519 | adapter->ctx_desc->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); |
511 | adapter->ctx_desc->sts_ring_size = | 520 | adapter->ctx_desc->sts_ring_size = cpu_to_le32(sds_ring->num_desc); |
512 | cpu_to_le32(adapter->num_rxd); | ||
513 | 521 | ||
514 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), | 522 | adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id), |
515 | lower32(adapter->ctx_desc_phys_addr)); | 523 | lower32(adapter->ctx_desc_phys_addr)); |
@@ -534,6 +542,10 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
534 | int ring; | 542 | int ring; |
535 | struct netxen_recv_context *recv_ctx; | 543 | struct netxen_recv_context *recv_ctx; |
536 | struct nx_host_rds_ring *rds_ring; | 544 | struct nx_host_rds_ring *rds_ring; |
545 | struct nx_host_sds_ring *sds_ring; | ||
546 | |||
547 | struct pci_dev *pdev = adapter->pdev; | ||
548 | struct net_device *netdev = adapter->netdev; | ||
537 | 549 | ||
538 | err = netxen_receive_peg_ready(adapter); | 550 | err = netxen_receive_peg_ready(adapter); |
539 | if (err) { | 551 | if (err) { |
@@ -542,12 +554,12 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
542 | return err; | 554 | return err; |
543 | } | 555 | } |
544 | 556 | ||
545 | addr = pci_alloc_consistent(adapter->pdev, | 557 | addr = pci_alloc_consistent(pdev, |
546 | sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), | 558 | sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), |
547 | &adapter->ctx_desc_phys_addr); | 559 | &adapter->ctx_desc_phys_addr); |
548 | 560 | ||
549 | if (addr == NULL) { | 561 | if (addr == NULL) { |
550 | DPRINTK(ERR, "failed to allocate hw context\n"); | 562 | dev_err(&pdev->dev, "failed to allocate hw context\n"); |
551 | return -ENOMEM; | 563 | return -ENOMEM; |
552 | } | 564 | } |
553 | memset(addr, 0, sizeof(struct netxen_ring_ctx)); | 565 | memset(addr, 0, sizeof(struct netxen_ring_ctx)); |
@@ -560,14 +572,13 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
560 | (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); | 572 | (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); |
561 | 573 | ||
562 | /* cmd desc ring */ | 574 | /* cmd desc ring */ |
563 | addr = pci_alloc_consistent(adapter->pdev, | 575 | addr = pci_alloc_consistent(pdev, |
564 | sizeof(struct cmd_desc_type0) * | 576 | TX_DESC_RINGSIZE(adapter), |
565 | adapter->num_txd, | ||
566 | &hw->cmd_desc_phys_addr); | 577 | &hw->cmd_desc_phys_addr); |
567 | 578 | ||
568 | if (addr == NULL) { | 579 | if (addr == NULL) { |
569 | printk(KERN_ERR "%s failed to allocate tx desc ring\n", | 580 | dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", |
570 | netxen_nic_driver_name); | 581 | netdev->name); |
571 | return -ENOMEM; | 582 | return -ENOMEM; |
572 | } | 583 | } |
573 | 584 | ||
@@ -576,15 +587,14 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
576 | recv_ctx = &adapter->recv_ctx; | 587 | recv_ctx = &adapter->recv_ctx; |
577 | 588 | ||
578 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 589 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
579 | /* rx desc ring */ | ||
580 | rds_ring = &recv_ctx->rds_rings[ring]; | 590 | rds_ring = &recv_ctx->rds_rings[ring]; |
581 | addr = pci_alloc_consistent(adapter->pdev, | 591 | addr = pci_alloc_consistent(adapter->pdev, |
582 | RCV_DESC_RINGSIZE, | 592 | RCV_DESC_RINGSIZE(rds_ring), |
583 | &rds_ring->phys_addr); | 593 | &rds_ring->phys_addr); |
584 | if (addr == NULL) { | 594 | if (addr == NULL) { |
585 | printk(KERN_ERR "%s failed to allocate rx " | 595 | dev_err(&pdev->dev, |
586 | "desc ring[%d]\n", | 596 | "%s: failed to allocate rds ring [%d]\n", |
587 | netxen_nic_driver_name, ring); | 597 | netdev->name, ring); |
588 | err = -ENOMEM; | 598 | err = -ENOMEM; |
589 | goto err_out_free; | 599 | goto err_out_free; |
590 | } | 600 | } |
@@ -596,22 +606,22 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
596 | crb_rcv_producer[ring]; | 606 | crb_rcv_producer[ring]; |
597 | } | 607 | } |
598 | 608 | ||
599 | /* status desc ring */ | 609 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
600 | addr = pci_alloc_consistent(adapter->pdev, | 610 | sds_ring = &recv_ctx->sds_rings[ring]; |
601 | STATUS_DESC_RINGSIZE, | 611 | |
602 | &recv_ctx->rcv_status_desc_phys_addr); | 612 | addr = pci_alloc_consistent(adapter->pdev, |
603 | if (addr == NULL) { | 613 | STATUS_DESC_RINGSIZE(sds_ring), |
604 | printk(KERN_ERR "%s failed to allocate sts desc ring\n", | 614 | &sds_ring->phys_addr); |
605 | netxen_nic_driver_name); | 615 | if (addr == NULL) { |
606 | err = -ENOMEM; | 616 | dev_err(&pdev->dev, |
607 | goto err_out_free; | 617 | "%s: failed to allocate sds ring [%d]\n", |
618 | netdev->name, ring); | ||
619 | err = -ENOMEM; | ||
620 | goto err_out_free; | ||
621 | } | ||
622 | sds_ring->desc_head = (struct status_desc *)addr; | ||
608 | } | 623 | } |
609 | recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; | ||
610 | 624 | ||
611 | if (adapter->fw_major < 4) | ||
612 | recv_ctx->crb_sts_consumer = | ||
613 | recv_crb_registers[adapter->portnum]. | ||
614 | crb_sts_consumer; | ||
615 | 625 | ||
616 | if (adapter->fw_major >= 4) { | 626 | if (adapter->fw_major >= 4) { |
617 | adapter->intr_scheme = INTR_SCHEME_PERPORT; | 627 | adapter->intr_scheme = INTR_SCHEME_PERPORT; |
@@ -624,12 +634,16 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter) | |||
624 | if (err) | 634 | if (err) |
625 | goto err_out_free; | 635 | goto err_out_free; |
626 | } else { | 636 | } else { |
637 | sds_ring = &recv_ctx->sds_rings[0]; | ||
638 | sds_ring->crb_sts_consumer = | ||
639 | recv_crb_registers[adapter->portnum].crb_sts_consumer; | ||
627 | 640 | ||
628 | adapter->intr_scheme = adapter->pci_read_normalize(adapter, | 641 | adapter->intr_scheme = adapter->pci_read_normalize(adapter, |
629 | CRB_NIC_CAPABILITIES_FW); | 642 | CRB_NIC_CAPABILITIES_FW); |
630 | adapter->msi_mode = adapter->pci_read_normalize(adapter, | 643 | adapter->msi_mode = adapter->pci_read_normalize(adapter, |
631 | CRB_NIC_MSI_MODE_FW); | 644 | CRB_NIC_MSI_MODE_FW); |
632 | adapter->crb_intr_mask = sw_int_mask[adapter->portnum]; | 645 | recv_ctx->sds_rings[0].crb_intr_mask = |
646 | sw_int_mask[adapter->portnum]; | ||
633 | 647 | ||
634 | err = netxen_init_old_ctx(adapter); | 648 | err = netxen_init_old_ctx(adapter); |
635 | if (err) { | 649 | if (err) { |
@@ -650,6 +664,7 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
650 | { | 664 | { |
651 | struct netxen_recv_context *recv_ctx; | 665 | struct netxen_recv_context *recv_ctx; |
652 | struct nx_host_rds_ring *rds_ring; | 666 | struct nx_host_rds_ring *rds_ring; |
667 | struct nx_host_sds_ring *sds_ring; | ||
653 | int ring; | 668 | int ring; |
654 | 669 | ||
655 | if (adapter->fw_major >= 4) { | 670 | if (adapter->fw_major >= 4) { |
@@ -681,19 +696,23 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter) | |||
681 | 696 | ||
682 | if (rds_ring->desc_head != NULL) { | 697 | if (rds_ring->desc_head != NULL) { |
683 | pci_free_consistent(adapter->pdev, | 698 | pci_free_consistent(adapter->pdev, |
684 | RCV_DESC_RINGSIZE, | 699 | RCV_DESC_RINGSIZE(rds_ring), |
685 | rds_ring->desc_head, | 700 | rds_ring->desc_head, |
686 | rds_ring->phys_addr); | 701 | rds_ring->phys_addr); |
687 | rds_ring->desc_head = NULL; | 702 | rds_ring->desc_head = NULL; |
688 | } | 703 | } |
689 | } | 704 | } |
690 | 705 | ||
691 | if (recv_ctx->rcv_status_desc_head != NULL) { | 706 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
692 | pci_free_consistent(adapter->pdev, | 707 | sds_ring = &recv_ctx->sds_rings[ring]; |
693 | STATUS_DESC_RINGSIZE, | 708 | |
694 | recv_ctx->rcv_status_desc_head, | 709 | if (sds_ring->desc_head != NULL) { |
695 | recv_ctx->rcv_status_desc_phys_addr); | 710 | pci_free_consistent(adapter->pdev, |
696 | recv_ctx->rcv_status_desc_head = NULL; | 711 | STATUS_DESC_RINGSIZE(sds_ring), |
712 | sds_ring->desc_head, | ||
713 | sds_ring->phys_addr); | ||
714 | sds_ring->desc_head = NULL; | ||
715 | } | ||
697 | } | 716 | } |
698 | } | 717 | } |
699 | 718 | ||
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index cea7300426b..c89c791e281 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c | |||
@@ -670,6 +670,53 @@ int netxen_config_intr_coalesce(struct netxen_adapter *adapter) | |||
670 | return rv; | 670 | return rv; |
671 | } | 671 | } |
672 | 672 | ||
673 | #define RSS_HASHTYPE_IP_TCP 0x3 | ||
674 | |||
675 | int netxen_config_rss(struct netxen_adapter *adapter, int enable) | ||
676 | { | ||
677 | nx_nic_req_t req; | ||
678 | u64 word; | ||
679 | int i, rv; | ||
680 | |||
681 | u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, | ||
682 | 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, | ||
683 | 0x255b0ec26d5a56daULL }; | ||
684 | |||
685 | |||
686 | memset(&req, 0, sizeof(nx_nic_req_t)); | ||
687 | req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); | ||
688 | |||
689 | word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16); | ||
690 | req.req_hdr = cpu_to_le64(word); | ||
691 | |||
692 | /* | ||
693 | * RSS request: | ||
694 | * bits 3-0: hash_method | ||
695 | * 5-4: hash_type_ipv4 | ||
696 | * 7-6: hash_type_ipv6 | ||
697 | * 8: enable | ||
698 | * 9: use indirection table | ||
699 | * 47-10: reserved | ||
700 | * 63-48: indirection table mask | ||
701 | */ | ||
702 | word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) | | ||
703 | ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) | | ||
704 | ((u64)(enable & 0x1) << 8) | | ||
705 | ((0x7ULL) << 48); | ||
706 | req.words[0] = cpu_to_le64(word); | ||
707 | for (i = 0; i < 5; i++) | ||
708 | req.words[i+1] = cpu_to_le64(key[i]); | ||
709 | |||
710 | |||
711 | rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); | ||
712 | if (rv != 0) { | ||
713 | printk(KERN_ERR "%s: could not configure RSS\n", | ||
714 | adapter->netdev->name); | ||
715 | } | ||
716 | |||
717 | return rv; | ||
718 | } | ||
719 | |||
673 | /* | 720 | /* |
674 | * netxen_nic_change_mtu - Change the Maximum Transfer Unit | 721 | * netxen_nic_change_mtu - Change the Maximum Transfer Unit |
675 | * @returns 0 on success, negative on failure | 722 | * @returns 0 on success, negative on failure |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1b8f79f7f8c..0759c35f16a 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -50,7 +50,8 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; | |||
50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff | 50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff |
51 | 51 | ||
52 | static void | 52 | static void |
53 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid); | 53 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, |
54 | struct nx_host_rds_ring *rds_ring); | ||
54 | 55 | ||
55 | static void crb_addr_transform_setup(void) | 56 | static void crb_addr_transform_setup(void) |
56 | { | 57 | { |
@@ -222,19 +223,21 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
222 | { | 223 | { |
223 | struct netxen_recv_context *recv_ctx; | 224 | struct netxen_recv_context *recv_ctx; |
224 | struct nx_host_rds_ring *rds_ring; | 225 | struct nx_host_rds_ring *rds_ring; |
226 | struct nx_host_sds_ring *sds_ring; | ||
225 | struct netxen_rx_buffer *rx_buf; | 227 | struct netxen_rx_buffer *rx_buf; |
226 | int ring, i, num_rx_bufs; | 228 | int ring, i, num_rx_bufs; |
227 | 229 | ||
228 | struct netxen_cmd_buffer *cmd_buf_arr; | 230 | struct netxen_cmd_buffer *cmd_buf_arr; |
229 | struct net_device *netdev = adapter->netdev; | 231 | struct net_device *netdev = adapter->netdev; |
230 | 232 | ||
231 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); | 233 | cmd_buf_arr = |
234 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter)); | ||
232 | if (cmd_buf_arr == NULL) { | 235 | if (cmd_buf_arr == NULL) { |
233 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", | 236 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", |
234 | netdev->name); | 237 | netdev->name); |
235 | return -ENOMEM; | 238 | return -ENOMEM; |
236 | } | 239 | } |
237 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | 240 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter)); |
238 | adapter->cmd_buf_arr = cmd_buf_arr; | 241 | adapter->cmd_buf_arr = cmd_buf_arr; |
239 | 242 | ||
240 | recv_ctx = &adapter->recv_ctx; | 243 | recv_ctx = &adapter->recv_ctx; |
@@ -275,7 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
275 | 278 | ||
276 | } | 279 | } |
277 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) | 280 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) |
278 | vmalloc(RCV_BUFFSIZE); | 281 | vmalloc(RCV_BUFF_RINGSIZE(rds_ring)); |
279 | if (rds_ring->rx_buf_arr == NULL) { | 282 | if (rds_ring->rx_buf_arr == NULL) { |
280 | printk(KERN_ERR "%s: Failed to allocate " | 283 | printk(KERN_ERR "%s: Failed to allocate " |
281 | "rx buffer ring %d\n", | 284 | "rx buffer ring %d\n", |
@@ -283,7 +286,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
283 | /* free whatever was already allocated */ | 286 | /* free whatever was already allocated */ |
284 | goto err_out; | 287 | goto err_out; |
285 | } | 288 | } |
286 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 289 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring)); |
287 | INIT_LIST_HEAD(&rds_ring->free_list); | 290 | INIT_LIST_HEAD(&rds_ring->free_list); |
288 | /* | 291 | /* |
289 | * Now go through all of them, set reference handles | 292 | * Now go through all of them, set reference handles |
@@ -298,6 +301,19 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
298 | rx_buf->state = NETXEN_BUFFER_FREE; | 301 | rx_buf->state = NETXEN_BUFFER_FREE; |
299 | rx_buf++; | 302 | rx_buf++; |
300 | } | 303 | } |
304 | spin_lock_init(&rds_ring->lock); | ||
305 | } | ||
306 | |||
307 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
308 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
309 | sds_ring->irq = adapter->msix_entries[ring].vector; | ||
310 | sds_ring->clean_tx = (ring == 0); | ||
311 | sds_ring->post_rxd = (ring == 0); | ||
312 | sds_ring->adapter = adapter; | ||
313 | sds_ring->num_desc = adapter->num_rxd; | ||
314 | |||
315 | for (i = 0; i < NUM_RCV_DESC_RINGS; i++) | ||
316 | INIT_LIST_HEAD(&sds_ring->free_list[i]); | ||
301 | } | 317 | } |
302 | 318 | ||
303 | return 0; | 319 | return 0; |
@@ -793,6 +809,40 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter) | |||
793 | return 0; | 809 | return 0; |
794 | } | 810 | } |
795 | 811 | ||
812 | static int | ||
813 | netxen_alloc_rx_skb(struct netxen_adapter *adapter, | ||
814 | struct nx_host_rds_ring *rds_ring, | ||
815 | struct netxen_rx_buffer *buffer) | ||
816 | { | ||
817 | struct sk_buff *skb; | ||
818 | dma_addr_t dma; | ||
819 | struct pci_dev *pdev = adapter->pdev; | ||
820 | |||
821 | buffer->skb = dev_alloc_skb(rds_ring->skb_size); | ||
822 | if (!buffer->skb) | ||
823 | return 1; | ||
824 | |||
825 | skb = buffer->skb; | ||
826 | |||
827 | if (!adapter->ahw.cut_through) | ||
828 | skb_reserve(skb, 2); | ||
829 | |||
830 | dma = pci_map_single(pdev, skb->data, | ||
831 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
832 | |||
833 | if (pci_dma_mapping_error(pdev, dma)) { | ||
834 | dev_kfree_skb_any(skb); | ||
835 | buffer->skb = NULL; | ||
836 | return 1; | ||
837 | } | ||
838 | |||
839 | buffer->skb = skb; | ||
840 | buffer->dma = dma; | ||
841 | buffer->state = NETXEN_BUFFER_BUSY; | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
796 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, | 846 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, |
797 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) | 847 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) |
798 | { | 848 | { |
@@ -817,14 +867,12 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, | |||
817 | skb->dev = adapter->netdev; | 867 | skb->dev = adapter->netdev; |
818 | 868 | ||
819 | buffer->skb = NULL; | 869 | buffer->skb = NULL; |
820 | |||
821 | no_skb: | 870 | no_skb: |
822 | buffer->state = NETXEN_BUFFER_FREE; | 871 | buffer->state = NETXEN_BUFFER_FREE; |
823 | list_add_tail(&buffer->list, &rds_ring->free_list); | ||
824 | return skb; | 872 | return skb; |
825 | } | 873 | } |
826 | 874 | ||
827 | static void | 875 | static struct netxen_rx_buffer * |
828 | netxen_process_rcv(struct netxen_adapter *adapter, | 876 | netxen_process_rcv(struct netxen_adapter *adapter, |
829 | int ring, int index, int length, int cksum, int pkt_offset) | 877 | int ring, int index, int length, int cksum, int pkt_offset) |
830 | { | 878 | { |
@@ -835,13 +883,13 @@ netxen_process_rcv(struct netxen_adapter *adapter, | |||
835 | struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring]; | 883 | struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring]; |
836 | 884 | ||
837 | if (unlikely(index > rds_ring->num_desc)) | 885 | if (unlikely(index > rds_ring->num_desc)) |
838 | return; | 886 | return NULL; |
839 | 887 | ||
840 | buffer = &rds_ring->rx_buf_arr[index]; | 888 | buffer = &rds_ring->rx_buf_arr[index]; |
841 | 889 | ||
842 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); | 890 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); |
843 | if (!skb) | 891 | if (!skb) |
844 | return; | 892 | return buffer; |
845 | 893 | ||
846 | if (length > rds_ring->skb_size) | 894 | if (length > rds_ring->skb_size) |
847 | skb_put(skb, rds_ring->skb_size); | 895 | skb_put(skb, rds_ring->skb_size); |
@@ -858,21 +906,31 @@ netxen_process_rcv(struct netxen_adapter *adapter, | |||
858 | 906 | ||
859 | adapter->stats.no_rcv++; | 907 | adapter->stats.no_rcv++; |
860 | adapter->stats.rxbytes += length; | 908 | adapter->stats.rxbytes += length; |
909 | |||
910 | return buffer; | ||
861 | } | 911 | } |
862 | 912 | ||
913 | #define netxen_merge_rx_buffers(list, head) \ | ||
914 | do { list_splice_tail_init(list, head); } while (0); | ||
915 | |||
863 | int | 916 | int |
864 | netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) | 917 | netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) |
865 | { | 918 | { |
866 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | 919 | struct netxen_adapter *adapter = sds_ring->adapter; |
867 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 920 | |
921 | struct list_head *cur; | ||
922 | |||
868 | struct status_desc *desc; | 923 | struct status_desc *desc; |
869 | u32 consumer = recv_ctx->status_rx_consumer; | 924 | struct netxen_rx_buffer *rxbuf; |
925 | |||
926 | u32 consumer = sds_ring->consumer; | ||
927 | |||
870 | int count = 0; | 928 | int count = 0; |
871 | u64 sts_data; | 929 | u64 sts_data; |
872 | int opcode, ring, index, length, cksum, pkt_offset; | 930 | int opcode, ring, index, length, cksum, pkt_offset; |
873 | 931 | ||
874 | while (count < max) { | 932 | while (count < max) { |
875 | desc = &desc_head[consumer]; | 933 | desc = &sds_ring->desc_head[consumer]; |
876 | sts_data = le64_to_cpu(desc->status_desc_data); | 934 | sts_data = le64_to_cpu(desc->status_desc_data); |
877 | 935 | ||
878 | if (!(sts_data & STATUS_OWNER_HOST)) | 936 | if (!(sts_data & STATUS_OWNER_HOST)) |
@@ -889,22 +947,41 @@ netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) | |||
889 | cksum = netxen_get_sts_status(sts_data); | 947 | cksum = netxen_get_sts_status(sts_data); |
890 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); | 948 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); |
891 | 949 | ||
892 | netxen_process_rcv(adapter, ring, index, | 950 | rxbuf = netxen_process_rcv(adapter, ring, index, |
893 | length, cksum, pkt_offset); | 951 | length, cksum, pkt_offset); |
894 | 952 | ||
953 | if (rxbuf) | ||
954 | list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); | ||
955 | |||
895 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); | 956 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); |
896 | 957 | ||
897 | consumer = get_next_index(consumer, adapter->num_rxd); | 958 | consumer = get_next_index(consumer, sds_ring->num_desc); |
898 | count++; | 959 | count++; |
899 | } | 960 | } |
900 | 961 | ||
901 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 962 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
902 | netxen_post_rx_buffers_nodb(adapter, ring); | 963 | struct nx_host_rds_ring *rds_ring = |
964 | &adapter->recv_ctx.rds_rings[ring]; | ||
965 | |||
966 | if (!list_empty(&sds_ring->free_list[ring])) { | ||
967 | list_for_each(cur, &sds_ring->free_list[ring]) { | ||
968 | rxbuf = list_entry(cur, | ||
969 | struct netxen_rx_buffer, list); | ||
970 | netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); | ||
971 | } | ||
972 | spin_lock(&rds_ring->lock); | ||
973 | netxen_merge_rx_buffers(&sds_ring->free_list[ring], | ||
974 | &rds_ring->free_list); | ||
975 | spin_unlock(&rds_ring->lock); | ||
976 | } | ||
977 | |||
978 | netxen_post_rx_buffers_nodb(adapter, rds_ring); | ||
979 | } | ||
903 | 980 | ||
904 | if (count) { | 981 | if (count) { |
905 | recv_ctx->status_rx_consumer = consumer; | 982 | sds_ring->consumer = consumer; |
906 | adapter->pci_write_normalize(adapter, | 983 | adapter->pci_write_normalize(adapter, |
907 | recv_ctx->crb_sts_consumer, consumer); | 984 | sds_ring->crb_sts_consumer, consumer); |
908 | } | 985 | } |
909 | 986 | ||
910 | return count; | 987 | return count; |
@@ -921,6 +998,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
921 | struct netxen_skb_frag *frag; | 998 | struct netxen_skb_frag *frag; |
922 | int done = 0; | 999 | int done = 0; |
923 | 1000 | ||
1001 | if (!spin_trylock(&adapter->tx_clean_lock)) | ||
1002 | return 1; | ||
1003 | |||
924 | last_consumer = adapter->last_cmd_consumer; | 1004 | last_consumer = adapter->last_cmd_consumer; |
925 | barrier(); /* cmd_consumer can change underneath */ | 1005 | barrier(); /* cmd_consumer can change underneath */ |
926 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1006 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
@@ -976,63 +1056,46 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
976 | barrier(); /* cmd_consumer can change underneath */ | 1056 | barrier(); /* cmd_consumer can change underneath */ |
977 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1057 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
978 | done = (last_consumer == consumer); | 1058 | done = (last_consumer == consumer); |
1059 | spin_unlock(&adapter->tx_clean_lock); | ||
979 | 1060 | ||
980 | return (done); | 1061 | return (done); |
981 | } | 1062 | } |
982 | 1063 | ||
983 | void | 1064 | void |
984 | netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | 1065 | netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, |
1066 | struct nx_host_rds_ring *rds_ring) | ||
985 | { | 1067 | { |
986 | struct pci_dev *pdev = adapter->pdev; | ||
987 | struct sk_buff *skb; | ||
988 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
989 | struct nx_host_rds_ring *rds_ring = NULL; | ||
990 | uint producer; | ||
991 | struct rcv_desc *pdesc; | 1068 | struct rcv_desc *pdesc; |
992 | struct netxen_rx_buffer *buffer; | 1069 | struct netxen_rx_buffer *buffer; |
993 | int count = 0; | 1070 | int producer, count = 0; |
994 | netxen_ctx_msg msg = 0; | 1071 | netxen_ctx_msg msg = 0; |
995 | dma_addr_t dma; | ||
996 | struct list_head *head; | 1072 | struct list_head *head; |
997 | 1073 | ||
998 | rds_ring = &recv_ctx->rds_rings[ringid]; | ||
999 | |||
1000 | producer = rds_ring->producer; | 1074 | producer = rds_ring->producer; |
1001 | head = &rds_ring->free_list; | ||
1002 | 1075 | ||
1076 | spin_lock(&rds_ring->lock); | ||
1077 | head = &rds_ring->free_list; | ||
1003 | while (!list_empty(head)) { | 1078 | while (!list_empty(head)) { |
1004 | 1079 | ||
1005 | skb = dev_alloc_skb(rds_ring->skb_size); | 1080 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1006 | if (unlikely(!skb)) { | ||
1007 | break; | ||
1008 | } | ||
1009 | |||
1010 | if (!adapter->ahw.cut_through) | ||
1011 | skb_reserve(skb, 2); | ||
1012 | 1081 | ||
1013 | dma = pci_map_single(pdev, skb->data, | 1082 | if (!buffer->skb) { |
1014 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | 1083 | if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) |
1015 | if (pci_dma_mapping_error(pdev, dma)) { | 1084 | break; |
1016 | dev_kfree_skb_any(skb); | ||
1017 | break; | ||
1018 | } | 1085 | } |
1019 | 1086 | ||
1020 | count++; | 1087 | count++; |
1021 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1022 | list_del(&buffer->list); | 1088 | list_del(&buffer->list); |
1023 | 1089 | ||
1024 | buffer->skb = skb; | ||
1025 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1026 | buffer->dma = dma; | ||
1027 | |||
1028 | /* make a rcv descriptor */ | 1090 | /* make a rcv descriptor */ |
1029 | pdesc = &rds_ring->desc_head[producer]; | 1091 | pdesc = &rds_ring->desc_head[producer]; |
1030 | pdesc->addr_buffer = cpu_to_le64(dma); | 1092 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1031 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1093 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1032 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1094 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1033 | 1095 | ||
1034 | producer = get_next_index(producer, rds_ring->num_desc); | 1096 | producer = get_next_index(producer, rds_ring->num_desc); |
1035 | } | 1097 | } |
1098 | spin_unlock(&rds_ring->lock); | ||
1036 | 1099 | ||
1037 | if (count) { | 1100 | if (count) { |
1038 | rds_ring->producer = producer; | 1101 | rds_ring->producer = producer; |
@@ -1061,48 +1124,31 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | |||
1061 | } | 1124 | } |
1062 | 1125 | ||
1063 | static void | 1126 | static void |
1064 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | 1127 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, |
1128 | struct nx_host_rds_ring *rds_ring) | ||
1065 | { | 1129 | { |
1066 | struct pci_dev *pdev = adapter->pdev; | ||
1067 | struct sk_buff *skb; | ||
1068 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
1069 | struct nx_host_rds_ring *rds_ring = NULL; | ||
1070 | u32 producer; | ||
1071 | struct rcv_desc *pdesc; | 1130 | struct rcv_desc *pdesc; |
1072 | struct netxen_rx_buffer *buffer; | 1131 | struct netxen_rx_buffer *buffer; |
1073 | int count = 0; | 1132 | int producer, count = 0; |
1074 | struct list_head *head; | 1133 | struct list_head *head; |
1075 | dma_addr_t dma; | ||
1076 | |||
1077 | rds_ring = &recv_ctx->rds_rings[ringid]; | ||
1078 | 1134 | ||
1079 | producer = rds_ring->producer; | 1135 | producer = rds_ring->producer; |
1136 | if (!spin_trylock(&rds_ring->lock)) | ||
1137 | return; | ||
1138 | |||
1080 | head = &rds_ring->free_list; | 1139 | head = &rds_ring->free_list; |
1081 | while (!list_empty(head)) { | 1140 | while (!list_empty(head)) { |
1082 | 1141 | ||
1083 | skb = dev_alloc_skb(rds_ring->skb_size); | 1142 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1084 | if (unlikely(!skb)) { | ||
1085 | break; | ||
1086 | } | ||
1087 | |||
1088 | if (!adapter->ahw.cut_through) | ||
1089 | skb_reserve(skb, 2); | ||
1090 | 1143 | ||
1091 | dma = pci_map_single(pdev, skb->data, | 1144 | if (!buffer->skb) { |
1092 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | 1145 | if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) |
1093 | if (pci_dma_mapping_error(pdev, dma)) { | 1146 | break; |
1094 | dev_kfree_skb_any(skb); | ||
1095 | break; | ||
1096 | } | 1147 | } |
1097 | 1148 | ||
1098 | count++; | 1149 | count++; |
1099 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1100 | list_del(&buffer->list); | 1150 | list_del(&buffer->list); |
1101 | 1151 | ||
1102 | buffer->skb = skb; | ||
1103 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1104 | buffer->dma = dma; | ||
1105 | |||
1106 | /* make a rcv descriptor */ | 1152 | /* make a rcv descriptor */ |
1107 | pdesc = &rds_ring->desc_head[producer]; | 1153 | pdesc = &rds_ring->desc_head[producer]; |
1108 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1154 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
@@ -1119,6 +1165,7 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | |||
1119 | (producer - 1) & (rds_ring->num_desc - 1)); | 1165 | (producer - 1) & (rds_ring->num_desc - 1)); |
1120 | wmb(); | 1166 | wmb(); |
1121 | } | 1167 | } |
1168 | spin_unlock(&rds_ring->lock); | ||
1122 | } | 1169 | } |
1123 | 1170 | ||
1124 | void netxen_nic_clear_stats(struct netxen_adapter *adapter) | 1171 | void netxen_nic_clear_stats(struct netxen_adapter *adapter) |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 00eaeee235e..274d1e0c893 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -135,20 +135,71 @@ static uint32_t msi_tgt_status[8] = { | |||
135 | 135 | ||
136 | static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; | 136 | static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; |
137 | 137 | ||
138 | static inline void netxen_nic_disable_int(struct netxen_adapter *adapter) | 138 | static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring) |
139 | { | 139 | { |
140 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0); | 140 | struct netxen_adapter *adapter = sds_ring->adapter; |
141 | |||
142 | adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0); | ||
141 | } | 143 | } |
142 | 144 | ||
143 | static inline void netxen_nic_enable_int(struct netxen_adapter *adapter) | 145 | static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring) |
144 | { | 146 | { |
145 | adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1); | 147 | struct netxen_adapter *adapter = sds_ring->adapter; |
148 | |||
149 | adapter->pci_write_normalize(adapter, sds_ring->crb_intr_mask, 0x1); | ||
146 | 150 | ||
147 | if (!NETXEN_IS_MSI_FAMILY(adapter)) | 151 | if (!NETXEN_IS_MSI_FAMILY(adapter)) |
148 | adapter->pci_write_immediate(adapter, | 152 | adapter->pci_write_immediate(adapter, |
149 | adapter->legacy_intr.tgt_mask_reg, 0xfbff); | 153 | adapter->legacy_intr.tgt_mask_reg, 0xfbff); |
150 | } | 154 | } |
151 | 155 | ||
156 | static void | ||
157 | netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev) | ||
158 | { | ||
159 | int ring; | ||
160 | struct nx_host_sds_ring *sds_ring; | ||
161 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
162 | |||
163 | if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) | ||
164 | adapter->max_sds_rings = (num_online_cpus() >= 4) ? 4 : 2; | ||
165 | else | ||
166 | adapter->max_sds_rings = 1; | ||
167 | |||
168 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
169 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
170 | netif_napi_add(netdev, &sds_ring->napi, | ||
171 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | static void | ||
176 | netxen_napi_enable(struct netxen_adapter *adapter) | ||
177 | { | ||
178 | int ring; | ||
179 | struct nx_host_sds_ring *sds_ring; | ||
180 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
181 | |||
182 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
183 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
184 | napi_enable(&sds_ring->napi); | ||
185 | netxen_nic_enable_int(sds_ring); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void | ||
190 | netxen_napi_disable(struct netxen_adapter *adapter) | ||
191 | { | ||
192 | int ring; | ||
193 | struct nx_host_sds_ring *sds_ring; | ||
194 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
195 | |||
196 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
197 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
198 | netxen_nic_disable_int(sds_ring); | ||
199 | napi_disable(&sds_ring->napi); | ||
200 | } | ||
201 | } | ||
202 | |||
152 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) | 203 | static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id) |
153 | { | 204 | { |
154 | struct pci_dev *pdev = adapter->pdev; | 205 | struct pci_dev *pdev = adapter->pdev; |
@@ -226,7 +277,6 @@ static void netxen_check_options(struct netxen_adapter *adapter) | |||
226 | adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS; | 277 | adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS; |
227 | adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; | 278 | adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS; |
228 | 279 | ||
229 | adapter->max_possible_rss_rings = 1; | ||
230 | return; | 280 | return; |
231 | } | 281 | } |
232 | 282 | ||
@@ -447,6 +497,7 @@ request_msi: | |||
447 | dev_info(&pdev->dev, "using msi interrupts\n"); | 497 | dev_info(&pdev->dev, "using msi interrupts\n"); |
448 | } else | 498 | } else |
449 | dev_info(&pdev->dev, "using legacy interrupts\n"); | 499 | dev_info(&pdev->dev, "using legacy interrupts\n"); |
500 | adapter->msix_entries[0].vector = pdev->irq; | ||
450 | } | 501 | } |
451 | } | 502 | } |
452 | 503 | ||
@@ -671,8 +722,12 @@ static int | |||
671 | netxen_nic_request_irq(struct netxen_adapter *adapter) | 722 | netxen_nic_request_irq(struct netxen_adapter *adapter) |
672 | { | 723 | { |
673 | irq_handler_t handler; | 724 | irq_handler_t handler; |
725 | struct nx_host_sds_ring *sds_ring; | ||
726 | int err, ring; | ||
727 | |||
674 | unsigned long flags = IRQF_SAMPLE_RANDOM; | 728 | unsigned long flags = IRQF_SAMPLE_RANDOM; |
675 | struct net_device *netdev = adapter->netdev; | 729 | struct net_device *netdev = adapter->netdev; |
730 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
676 | 731 | ||
677 | if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) || | 732 | if ((adapter->msi_mode != MSI_MODE_MULTIFUNC) || |
678 | (adapter->intr_scheme != INTR_SCHEME_PERPORT)) { | 733 | (adapter->intr_scheme != INTR_SCHEME_PERPORT)) { |
@@ -693,8 +748,30 @@ netxen_nic_request_irq(struct netxen_adapter *adapter) | |||
693 | } | 748 | } |
694 | adapter->irq = netdev->irq; | 749 | adapter->irq = netdev->irq; |
695 | 750 | ||
696 | return request_irq(adapter->irq, handler, | 751 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { |
697 | flags, netdev->name, adapter); | 752 | sds_ring = &recv_ctx->sds_rings[ring]; |
753 | sprintf(sds_ring->name, "%16s[%d]", netdev->name, ring); | ||
754 | err = request_irq(sds_ring->irq, handler, | ||
755 | flags, sds_ring->name, sds_ring); | ||
756 | if (err) | ||
757 | return err; | ||
758 | } | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static void | ||
764 | netxen_nic_free_irq(struct netxen_adapter *adapter) | ||
765 | { | ||
766 | int ring; | ||
767 | struct nx_host_sds_ring *sds_ring; | ||
768 | |||
769 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
770 | |||
771 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
772 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
773 | free_irq(sds_ring->irq, sds_ring); | ||
774 | } | ||
698 | } | 775 | } |
699 | 776 | ||
700 | static int | 777 | static int |
@@ -719,8 +796,10 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev) | |||
719 | adapter->ahw.linkup = 0; | 796 | adapter->ahw.linkup = 0; |
720 | mod_timer(&adapter->watchdog_timer, jiffies); | 797 | mod_timer(&adapter->watchdog_timer, jiffies); |
721 | 798 | ||
722 | napi_enable(&adapter->napi); | 799 | netxen_napi_enable(adapter); |
723 | netxen_nic_enable_int(adapter); | 800 | |
801 | if (adapter->max_sds_rings > 1) | ||
802 | netxen_config_rss(adapter, 1); | ||
724 | 803 | ||
725 | return 0; | 804 | return 0; |
726 | } | 805 | } |
@@ -730,13 +809,11 @@ netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev) | |||
730 | { | 809 | { |
731 | netif_carrier_off(netdev); | 810 | netif_carrier_off(netdev); |
732 | netif_stop_queue(netdev); | 811 | netif_stop_queue(netdev); |
733 | napi_disable(&adapter->napi); | 812 | netxen_napi_disable(adapter); |
734 | 813 | ||
735 | if (adapter->stop_port) | 814 | if (adapter->stop_port) |
736 | adapter->stop_port(adapter); | 815 | adapter->stop_port(adapter); |
737 | 816 | ||
738 | netxen_nic_disable_int(adapter); | ||
739 | |||
740 | netxen_release_tx_buffers(adapter); | 817 | netxen_release_tx_buffers(adapter); |
741 | 818 | ||
742 | FLUSH_SCHEDULED_WORK(); | 819 | FLUSH_SCHEDULED_WORK(); |
@@ -750,6 +827,7 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
750 | struct net_device *netdev = adapter->netdev; | 827 | struct net_device *netdev = adapter->netdev; |
751 | struct pci_dev *pdev = adapter->pdev; | 828 | struct pci_dev *pdev = adapter->pdev; |
752 | int err, ring; | 829 | int err, ring; |
830 | struct nx_host_rds_ring *rds_ring; | ||
753 | 831 | ||
754 | err = netxen_init_firmware(adapter); | 832 | err = netxen_init_firmware(adapter); |
755 | if (err != 0) { | 833 | if (err != 0) { |
@@ -788,8 +866,10 @@ netxen_nic_attach(struct netxen_adapter *adapter) | |||
788 | netxen_nic_update_cmd_consumer(adapter, 0); | 866 | netxen_nic_update_cmd_consumer(adapter, 0); |
789 | } | 867 | } |
790 | 868 | ||
791 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 869 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
792 | netxen_post_rx_buffers(adapter, ring); | 870 | rds_ring = &adapter->recv_ctx.rds_rings[ring]; |
871 | netxen_post_rx_buffers(adapter, ring, rds_ring); | ||
872 | } | ||
793 | 873 | ||
794 | err = netxen_nic_request_irq(adapter); | 874 | err = netxen_nic_request_irq(adapter); |
795 | if (err) { | 875 | if (err) { |
@@ -812,8 +892,7 @@ err_out_free_sw: | |||
812 | static void | 892 | static void |
813 | netxen_nic_detach(struct netxen_adapter *adapter) | 893 | netxen_nic_detach(struct netxen_adapter *adapter) |
814 | { | 894 | { |
815 | if (adapter->irq) | 895 | netxen_nic_free_irq(adapter); |
816 | free_irq(adapter->irq, adapter); | ||
817 | 896 | ||
818 | netxen_release_rx_buffers(adapter); | 897 | netxen_release_rx_buffers(adapter); |
819 | netxen_free_hw_resources(adapter); | 898 | netxen_free_hw_resources(adapter); |
@@ -883,14 +962,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
883 | goto err_out_free_netdev; | 962 | goto err_out_free_netdev; |
884 | 963 | ||
885 | rwlock_init(&adapter->adapter_lock); | 964 | rwlock_init(&adapter->adapter_lock); |
965 | spin_lock_init(&adapter->tx_clean_lock); | ||
886 | 966 | ||
887 | err = netxen_setup_pci_map(adapter); | 967 | err = netxen_setup_pci_map(adapter); |
888 | if (err) | 968 | if (err) |
889 | goto err_out_free_netdev; | 969 | goto err_out_free_netdev; |
890 | 970 | ||
891 | netif_napi_add(netdev, &adapter->napi, | ||
892 | netxen_nic_poll, NETXEN_NETDEV_WEIGHT); | ||
893 | |||
894 | /* This will be reset for mezz cards */ | 971 | /* This will be reset for mezz cards */ |
895 | adapter->portnum = pci_func_id; | 972 | adapter->portnum = pci_func_id; |
896 | adapter->rx_csum = 1; | 973 | adapter->rx_csum = 1; |
@@ -963,10 +1040,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
963 | 1040 | ||
964 | netxen_setup_intr(adapter); | 1041 | netxen_setup_intr(adapter); |
965 | 1042 | ||
966 | if (adapter->flags & NETXEN_NIC_MSIX_ENABLED) | 1043 | netdev->irq = adapter->msix_entries[0].vector; |
967 | netdev->irq = adapter->msix_entries[0].vector; | 1044 | |
968 | else | 1045 | netxen_napi_add(adapter, netdev); |
969 | netdev->irq = pdev->irq; | ||
970 | 1046 | ||
971 | err = netxen_receive_peg_ready(adapter); | 1047 | err = netxen_receive_peg_ready(adapter); |
972 | if (err) | 1048 | if (err) |
@@ -1520,13 +1596,11 @@ static void netxen_tx_timeout_task(struct work_struct *work) | |||
1520 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | 1596 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", |
1521 | netxen_nic_driver_name, adapter->netdev->name); | 1597 | netxen_nic_driver_name, adapter->netdev->name); |
1522 | 1598 | ||
1523 | netxen_nic_disable_int(adapter); | 1599 | netxen_napi_disable(adapter); |
1524 | napi_disable(&adapter->napi); | ||
1525 | 1600 | ||
1526 | adapter->netdev->trans_start = jiffies; | 1601 | adapter->netdev->trans_start = jiffies; |
1527 | 1602 | ||
1528 | napi_enable(&adapter->napi); | 1603 | netxen_napi_enable(adapter); |
1529 | netxen_nic_enable_int(adapter); | ||
1530 | netif_wake_queue(adapter->netdev); | 1604 | netif_wake_queue(adapter->netdev); |
1531 | } | 1605 | } |
1532 | 1606 | ||
@@ -1564,7 +1638,8 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) | |||
1564 | 1638 | ||
1565 | static irqreturn_t netxen_intr(int irq, void *data) | 1639 | static irqreturn_t netxen_intr(int irq, void *data) |
1566 | { | 1640 | { |
1567 | struct netxen_adapter *adapter = data; | 1641 | struct nx_host_sds_ring *sds_ring = data; |
1642 | struct netxen_adapter *adapter = sds_ring->adapter; | ||
1568 | u32 status = 0; | 1643 | u32 status = 0; |
1569 | 1644 | ||
1570 | status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | 1645 | status = adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); |
@@ -1595,7 +1670,7 @@ static irqreturn_t netxen_intr(int irq, void *data) | |||
1595 | 1670 | ||
1596 | /* clear interrupt */ | 1671 | /* clear interrupt */ |
1597 | if (adapter->fw_major < 4) | 1672 | if (adapter->fw_major < 4) |
1598 | netxen_nic_disable_int(adapter); | 1673 | netxen_nic_disable_int(sds_ring); |
1599 | 1674 | ||
1600 | adapter->pci_write_immediate(adapter, | 1675 | adapter->pci_write_immediate(adapter, |
1601 | adapter->legacy_intr.tgt_status_reg, | 1676 | adapter->legacy_intr.tgt_status_reg, |
@@ -1604,45 +1679,49 @@ static irqreturn_t netxen_intr(int irq, void *data) | |||
1604 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | 1679 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); |
1605 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); | 1680 | adapter->pci_read_immediate(adapter, ISR_INT_VECTOR); |
1606 | 1681 | ||
1607 | napi_schedule(&adapter->napi); | 1682 | napi_schedule(&sds_ring->napi); |
1608 | 1683 | ||
1609 | return IRQ_HANDLED; | 1684 | return IRQ_HANDLED; |
1610 | } | 1685 | } |
1611 | 1686 | ||
1612 | static irqreturn_t netxen_msi_intr(int irq, void *data) | 1687 | static irqreturn_t netxen_msi_intr(int irq, void *data) |
1613 | { | 1688 | { |
1614 | struct netxen_adapter *adapter = data; | 1689 | struct nx_host_sds_ring *sds_ring = data; |
1690 | struct netxen_adapter *adapter = sds_ring->adapter; | ||
1615 | 1691 | ||
1616 | /* clear interrupt */ | 1692 | /* clear interrupt */ |
1617 | adapter->pci_write_immediate(adapter, | 1693 | adapter->pci_write_immediate(adapter, |
1618 | msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); | 1694 | msi_tgt_status[adapter->ahw.pci_func], 0xffffffff); |
1619 | 1695 | ||
1620 | napi_schedule(&adapter->napi); | 1696 | napi_schedule(&sds_ring->napi); |
1621 | return IRQ_HANDLED; | 1697 | return IRQ_HANDLED; |
1622 | } | 1698 | } |
1623 | 1699 | ||
1624 | static irqreturn_t netxen_msix_intr(int irq, void *data) | 1700 | static irqreturn_t netxen_msix_intr(int irq, void *data) |
1625 | { | 1701 | { |
1626 | struct netxen_adapter *adapter = data; | 1702 | struct nx_host_sds_ring *sds_ring = data; |
1627 | 1703 | ||
1628 | napi_schedule(&adapter->napi); | 1704 | napi_schedule(&sds_ring->napi); |
1629 | return IRQ_HANDLED; | 1705 | return IRQ_HANDLED; |
1630 | } | 1706 | } |
1631 | 1707 | ||
1632 | static int netxen_nic_poll(struct napi_struct *napi, int budget) | 1708 | static int netxen_nic_poll(struct napi_struct *napi, int budget) |
1633 | { | 1709 | { |
1634 | struct netxen_adapter *adapter = | 1710 | struct nx_host_sds_ring *sds_ring = |
1635 | container_of(napi, struct netxen_adapter, napi); | 1711 | container_of(napi, struct nx_host_sds_ring, napi); |
1712 | |||
1713 | struct netxen_adapter *adapter = sds_ring->adapter; | ||
1714 | |||
1636 | int tx_complete; | 1715 | int tx_complete; |
1637 | int work_done; | 1716 | int work_done; |
1638 | 1717 | ||
1639 | tx_complete = netxen_process_cmd_ring(adapter); | 1718 | tx_complete = netxen_process_cmd_ring(adapter); |
1640 | 1719 | ||
1641 | work_done = netxen_process_rcv_ring(adapter, budget); | 1720 | work_done = netxen_process_rcv_ring(sds_ring, budget); |
1642 | 1721 | ||
1643 | if ((work_done < budget) && tx_complete) { | 1722 | if ((work_done < budget) && tx_complete) { |
1644 | napi_complete(&adapter->napi); | 1723 | napi_complete(&sds_ring->napi); |
1645 | netxen_nic_enable_int(adapter); | 1724 | netxen_nic_enable_int(sds_ring); |
1646 | } | 1725 | } |
1647 | 1726 | ||
1648 | return work_done; | 1727 | return work_done; |