aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/netxen
diff options
context:
space:
mode:
authorDhananjay Phadke <dhananjay@netxen.com>2008-07-21 22:44:06 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-07-22 17:52:04 -0400
commit48bfd1e0fc66b27254ec742b014e689ef218e76c (patch)
treefd380b835dc12a5500ff5972981ee9ae767639b4 /drivers/net/netxen
parenta97342f9790f14ac20bd5f8b16ed661411fa2e3e (diff)
netxen: add netxen_nic_ctx.c
Contains rx and tx ring context management and certain firmware commands for netxen firmware v4.0.0+. This patch gathers all HW context management code into netxen_nic_ctx.c. Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/netxen')
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h272
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c4
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c243
-rw-r--r--drivers/net/netxen/netxen_nic_init.c154
-rw-r--r--drivers/net/netxen/netxen_nic_main.c21
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h2
7 files changed, 359 insertions, 339 deletions
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index c63a20790659..8e7c4c910d2a 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -32,4 +32,4 @@
32obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o 32obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
33 33
34netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \ 34netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
35 netxen_nic_ethtool.o netxen_nic_niu.o 35 netxen_nic_ethtool.o netxen_nic_niu.o netxen_nic_ctx.o
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 87be0a6ef51a..936219010e46 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -84,7 +84,7 @@
84#define TX_RINGSIZE \ 84#define TX_RINGSIZE \
85 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) 85 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
86#define RCV_BUFFSIZE \ 86#define RCV_BUFFSIZE \
87 (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) 87 (sizeof(struct netxen_rx_buffer) * rds_ring->max_rx_desc_count)
88#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) 88#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
89 89
90#define NETXEN_NETDEV_STATUS 0x1 90#define NETXEN_NETDEV_STATUS 0x1
@@ -303,7 +303,7 @@ struct netxen_ring_ctx {
303#define netxen_set_cmd_desc_port(cmd_desc, var) \ 303#define netxen_set_cmd_desc_port(cmd_desc, var) \
304 ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) 304 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
305#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \ 305#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
306 ((cmd_desc)->port_ctxid |= ((var) & 0xF0)) 306 ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
307 307
308#define netxen_set_cmd_desc_flags(cmd_desc, val) \ 308#define netxen_set_cmd_desc_flags(cmd_desc, val) \
309 (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \ 309 (cmd_desc)->flags_opcode = ((cmd_desc)->flags_opcode & \
@@ -844,7 +844,7 @@ struct netxen_adapter_stats {
844 * Rcv Descriptor Context. One such per Rcv Descriptor. There may 844 * Rcv Descriptor Context. One such per Rcv Descriptor. There may
845 * be one Rcv Descriptor for normal packets, one for jumbo and may be others. 845 * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
846 */ 846 */
847struct netxen_rcv_desc_ctx { 847struct nx_host_rds_ring {
848 u32 flags; 848 u32 flags;
849 u32 producer; 849 u32 producer;
850 dma_addr_t phys_addr; 850 dma_addr_t phys_addr;
@@ -864,13 +864,270 @@ struct netxen_rcv_desc_ctx {
864 * present elsewhere. 864 * present elsewhere.
865 */ 865 */
866struct netxen_recv_context { 866struct netxen_recv_context {
867 struct netxen_rcv_desc_ctx rcv_desc[NUM_RCV_DESC_RINGS]; 867 u32 state;
868 u16 context_id;
869 u16 virt_port;
870
871 struct nx_host_rds_ring rds_rings[NUM_RCV_DESC_RINGS];
868 u32 status_rx_consumer; 872 u32 status_rx_consumer;
869 u32 crb_sts_consumer; /* reg offset */ 873 u32 crb_sts_consumer; /* reg offset */
870 dma_addr_t rcv_status_desc_phys_addr; 874 dma_addr_t rcv_status_desc_phys_addr;
871 struct status_desc *rcv_status_desc_head; 875 struct status_desc *rcv_status_desc_head;
872}; 876};
873 877
878/* New HW context creation */
879
880#define NX_OS_CRB_RETRY_COUNT 4000
881#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
882 (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
883
884#define NX_CDRP_CLEAR 0x00000000
885#define NX_CDRP_CMD_BIT 0x80000000
886
887/*
888 * All responses must have the NX_CDRP_CMD_BIT cleared
889 * in the crb NX_CDRP_CRB_OFFSET.
890 */
891#define NX_CDRP_FORM_RSP(rsp) (rsp)
892#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
893
894#define NX_CDRP_RSP_OK 0x00000001
895#define NX_CDRP_RSP_FAIL 0x00000002
896#define NX_CDRP_RSP_TIMEOUT 0x00000003
897
898/*
899 * All commands must have the NX_CDRP_CMD_BIT set in
900 * the crb NX_CDRP_CRB_OFFSET.
901 */
902#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
903#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
904
905#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
906#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
907#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
908#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
909#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
910#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
911#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
912#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
913#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
914#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
915#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
916#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
917#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
918#define NX_CDRP_CMD_SET_MTU 0x00000012
919#define NX_CDRP_CMD_MAX 0x00000013
920
921#define NX_RCODE_SUCCESS 0
922#define NX_RCODE_NO_HOST_MEM 1
923#define NX_RCODE_NO_HOST_RESOURCE 2
924#define NX_RCODE_NO_CARD_CRB 3
925#define NX_RCODE_NO_CARD_MEM 4
926#define NX_RCODE_NO_CARD_RESOURCE 5
927#define NX_RCODE_INVALID_ARGS 6
928#define NX_RCODE_INVALID_ACTION 7
929#define NX_RCODE_INVALID_STATE 8
930#define NX_RCODE_NOT_SUPPORTED 9
931#define NX_RCODE_NOT_PERMITTED 10
932#define NX_RCODE_NOT_READY 11
933#define NX_RCODE_DOES_NOT_EXIST 12
934#define NX_RCODE_ALREADY_EXISTS 13
935#define NX_RCODE_BAD_SIGNATURE 14
936#define NX_RCODE_CMD_NOT_IMPL 15
937#define NX_RCODE_CMD_INVALID 16
938#define NX_RCODE_TIMEOUT 17
939#define NX_RCODE_CMD_FAILED 18
940#define NX_RCODE_MAX_EXCEEDED 19
941#define NX_RCODE_MAX 20
942
943#define NX_DESTROY_CTX_RESET 0
944#define NX_DESTROY_CTX_D3_RESET 1
945#define NX_DESTROY_CTX_MAX 2
946
947/*
948 * Capabilities
949 */
950#define NX_CAP_BIT(class, bit) (1 << bit)
951#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
952#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
953#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
954#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
955#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
956#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
957#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
958#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
959#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
960
961/*
962 * Context state
963 */
964#define NX_HOST_CTX_STATE_FREED 0
965#define NX_HOST_CTX_STATE_ALLOCATED 1
966#define NX_HOST_CTX_STATE_ACTIVE 2
967#define NX_HOST_CTX_STATE_DISABLED 3
968#define NX_HOST_CTX_STATE_QUIESCED 4
969#define NX_HOST_CTX_STATE_MAX 5
970
971/*
972 * Rx context
973 */
974
975typedef struct {
976 u64 host_phys_addr; /* Ring base addr */
977 u32 ring_size; /* Ring entries */
978 u16 msi_index;
979 u16 rsvd; /* Padding */
980} nx_hostrq_sds_ring_t;
981
982typedef struct {
983 u64 host_phys_addr; /* Ring base addr */
984 u64 buff_size; /* Packet buffer size */
985 u32 ring_size; /* Ring entries */
986 u32 ring_kind; /* Class of ring */
987} nx_hostrq_rds_ring_t;
988
989typedef struct {
990 u64 host_rsp_dma_addr; /* Response dma'd here */
991 u32 capabilities[4]; /* Flag bit vector */
992 u32 host_int_crb_mode; /* Interrupt crb usage */
993 u32 host_rds_crb_mode; /* RDS crb usage */
994 /* These ring offsets are relative to data[0] below */
995 u32 rds_ring_offset; /* Offset to RDS config */
996 u32 sds_ring_offset; /* Offset to SDS config */
997 u16 num_rds_rings; /* Count of RDS rings */
998 u16 num_sds_rings; /* Count of SDS rings */
999 u16 rsvd1; /* Padding */
1000 u16 rsvd2; /* Padding */
1001 u8 reserved[128]; /* reserve space for future expansion*/
1002 /* MUST BE 64-bit aligned.
1003 The following is packed:
1004 - N hostrq_rds_rings
1005 - N hostrq_sds_rings */
1006 char data[0];
1007} nx_hostrq_rx_ctx_t;
1008
1009typedef struct {
1010 u32 host_producer_crb; /* Crb to use */
1011 u32 rsvd1; /* Padding */
1012} nx_cardrsp_rds_ring_t;
1013
1014typedef struct {
1015 u32 host_consumer_crb; /* Crb to use */
1016 u32 interrupt_crb; /* Crb to use */
1017} nx_cardrsp_sds_ring_t;
1018
1019typedef struct {
1020 /* These ring offsets are relative to data[0] below */
1021 u32 rds_ring_offset; /* Offset to RDS config */
1022 u32 sds_ring_offset; /* Offset to SDS config */
1023 u32 host_ctx_state; /* Starting State */
1024 u32 num_fn_per_port; /* How many PCI fn share the port */
1025 u16 num_rds_rings; /* Count of RDS rings */
1026 u16 num_sds_rings; /* Count of SDS rings */
1027 u16 context_id; /* Handle for context */
1028 u8 phys_port; /* Physical id of port */
1029 u8 virt_port; /* Virtual/Logical id of port */
1030 u8 reserved[128]; /* save space for future expansion */
1031 /* MUST BE 64-bit aligned.
1032 The following is packed:
1033 - N cardrsp_rds_rings
1034 - N cardrs_sds_rings */
1035 char data[0];
1036} nx_cardrsp_rx_ctx_t;
1037
1038#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
1039 (sizeof(HOSTRQ_RX) + \
1040 (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
1041 (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
1042
1043#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
1044 (sizeof(CARDRSP_RX) + \
1045 (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
1046 (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
1047
1048/*
1049 * Tx context
1050 */
1051
1052typedef struct {
1053 u64 host_phys_addr; /* Ring base addr */
1054 u32 ring_size; /* Ring entries */
1055 u32 rsvd; /* Padding */
1056} nx_hostrq_cds_ring_t;
1057
1058typedef struct {
1059 u64 host_rsp_dma_addr; /* Response dma'd here */
1060 u64 cmd_cons_dma_addr; /* */
1061 u64 dummy_dma_addr; /* */
1062 u32 capabilities[4]; /* Flag bit vector */
1063 u32 host_int_crb_mode; /* Interrupt crb usage */
1064 u32 rsvd1; /* Padding */
1065 u16 rsvd2; /* Padding */
1066 u16 interrupt_ctl;
1067 u16 msi_index;
1068 u16 rsvd3; /* Padding */
1069 nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
1070 u8 reserved[128]; /* future expansion */
1071} nx_hostrq_tx_ctx_t;
1072
1073typedef struct {
1074 u32 host_producer_crb; /* Crb to use */
1075 u32 interrupt_crb; /* Crb to use */
1076} nx_cardrsp_cds_ring_t;
1077
1078typedef struct {
1079 u32 host_ctx_state; /* Starting state */
1080 u16 context_id; /* Handle for context */
1081 u8 phys_port; /* Physical id of port */
1082 u8 virt_port; /* Virtual/Logical id of port */
1083 nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
1084 u8 reserved[128]; /* future expansion */
1085} nx_cardrsp_tx_ctx_t;
1086
1087#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
1088#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
1089
1090/* CRB */
1091
1092#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
1093#define NX_HOST_RDS_CRB_MODE_SHARED 1
1094#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
1095#define NX_HOST_RDS_CRB_MODE_MAX 3
1096
1097#define NX_HOST_INT_CRB_MODE_UNIQUE 0
1098#define NX_HOST_INT_CRB_MODE_SHARED 1
1099#define NX_HOST_INT_CRB_MODE_NORX 2
1100#define NX_HOST_INT_CRB_MODE_NOTX 3
1101#define NX_HOST_INT_CRB_MODE_NORXTX 4
1102
1103
1104/* MAC */
1105
1106#define MC_COUNT_P2 16
1107#define MC_COUNT_P3 38
1108
1109#define NETXEN_MAC_NOOP 0
1110#define NETXEN_MAC_ADD 1
1111#define NETXEN_MAC_DEL 2
1112
1113typedef struct nx_mac_list_s {
1114 struct nx_mac_list_s *next;
1115 uint8_t mac_addr[MAX_ADDR_LEN];
1116} nx_mac_list_t;
1117
1118typedef struct {
1119 u64 qhdr;
1120 u64 req_hdr;
1121 u64 words[6];
1122} nic_request_t;
1123
1124typedef struct {
1125 u8 op;
1126 u8 tag;
1127 u8 mac_addr[6];
1128} nx_mac_req_t;
1129
1130
874#define NETXEN_NIC_MSI_ENABLED 0x02 1131#define NETXEN_NIC_MSI_ENABLED 0x02
875#define NETXEN_NIC_MSIX_ENABLED 0x04 1132#define NETXEN_NIC_MSIX_ENABLED 0x04
876#define NETXEN_IS_MSI_FAMILY(adapter) \ 1133#define NETXEN_IS_MSI_FAMILY(adapter) \
@@ -899,11 +1156,13 @@ struct netxen_adapter {
899 int mtu; 1156 int mtu;
900 int portnum; 1157 int portnum;
901 u8 physical_port; 1158 u8 physical_port;
1159 u16 tx_context_id;
902 1160
903 uint8_t mc_enabled; 1161 uint8_t mc_enabled;
904 uint8_t max_mc_count; 1162 uint8_t max_mc_count;
905 1163
906 struct netxen_legacy_intr_set legacy_intr; 1164 struct netxen_legacy_intr_set legacy_intr;
1165 u32 crb_intr_mask;
907 1166
908 struct work_struct watchdog_task; 1167 struct work_struct watchdog_task;
909 struct timer_list watchdog_timer; 1168 struct timer_list watchdog_timer;
@@ -926,6 +1185,8 @@ struct netxen_adapter {
926 u32 max_jumbo_rx_desc_count; 1185 u32 max_jumbo_rx_desc_count;
927 u32 max_lro_rx_desc_count; 1186 u32 max_lro_rx_desc_count;
928 1187
1188 int max_rds_rings;
1189
929 u32 flags; 1190 u32 flags;
930 u32 irq; 1191 u32 irq;
931 int driver_mismatch; 1192 int driver_mismatch;
@@ -1144,7 +1405,10 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
1144int netxen_process_cmd_ring(struct netxen_adapter *adapter); 1405int netxen_process_cmd_ring(struct netxen_adapter *adapter);
1145u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1406u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
1146void netxen_nic_set_multi(struct net_device *netdev); 1407void netxen_nic_set_multi(struct net_device *netdev);
1408
1409u32 nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu);
1147int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1410int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
1411
1148int netxen_nic_set_mac(struct net_device *netdev, void *p); 1412int netxen_nic_set_mac(struct net_device *netdev, void *p);
1149struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev); 1413struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev);
1150 1414
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 7e49e6106073..381d55a52162 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -524,9 +524,9 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
524 ring->rx_jumbo_pending = 0; 524 ring->rx_jumbo_pending = 0;
525 for (i = 0; i < MAX_RCV_CTX; ++i) { 525 for (i = 0; i < MAX_RCV_CTX; ++i) {
526 ring->rx_pending += adapter->recv_ctx[i]. 526 ring->rx_pending += adapter->recv_ctx[i].
527 rcv_desc[RCV_DESC_NORMAL_CTXID].max_rx_desc_count; 527 rds_rings[RCV_DESC_NORMAL_CTXID].max_rx_desc_count;
528 ring->rx_jumbo_pending += adapter->recv_ctx[i]. 528 ring->rx_jumbo_pending += adapter->recv_ctx[i].
529 rcv_desc[RCV_DESC_JUMBO_CTXID].max_rx_desc_count; 529 rds_rings[RCV_DESC_JUMBO_CTXID].max_rx_desc_count;
530 } 530 }
531 ring->tx_pending = adapter->max_tx_desc_count; 531 ring->tx_pending = adapter->max_tx_desc_count;
532 532
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index fde8c6f1c9f5..d46b4dff783d 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -280,80 +280,6 @@ static unsigned crb_hub_agt[64] =
280 0, 280 0,
281}; 281};
282 282
283struct netxen_recv_crb recv_crb_registers[] = {
284 /*
285 * Instance 0.
286 */
287 {
288 /* crb_rcv_producer: */
289 {
290 NETXEN_NIC_REG(0x100),
291 /* Jumbo frames */
292 NETXEN_NIC_REG(0x110),
293 /* LRO */
294 NETXEN_NIC_REG(0x120)
295 },
296 /* crb_sts_consumer: */
297 NETXEN_NIC_REG(0x138),
298 },
299 /*
300 * Instance 1,
301 */
302 {
303 /* crb_rcv_producer: */
304 {
305 NETXEN_NIC_REG(0x144),
306 /* Jumbo frames */
307 NETXEN_NIC_REG(0x154),
308 /* LRO */
309 NETXEN_NIC_REG(0x164)
310 },
311 /* crb_sts_consumer: */
312 NETXEN_NIC_REG(0x17c),
313 },
314 /*
315 * Instance 2,
316 */
317 {
318 /* crb_rcv_producer: */
319 {
320 NETXEN_NIC_REG(0x1d8),
321 /* Jumbo frames */
322 NETXEN_NIC_REG(0x1f8),
323 /* LRO */
324 NETXEN_NIC_REG(0x208)
325 },
326 /* crb_sts_consumer: */
327 NETXEN_NIC_REG(0x220),
328 },
329 /*
330 * Instance 3,
331 */
332 {
333 /* crb_rcv_producer: */
334 {
335 NETXEN_NIC_REG(0x22c),
336 /* Jumbo frames */
337 NETXEN_NIC_REG(0x23c),
338 /* LRO */
339 NETXEN_NIC_REG(0x24c)
340 },
341 /* crb_sts_consumer: */
342 NETXEN_NIC_REG(0x264),
343 },
344};
345
346static u64 ctx_addr_sig_regs[][3] = {
347 {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
348 {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
349 {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
350 {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
351};
352#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
353#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
354#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
355
356
357/* PCI Windowing for DDR regions. */ 283/* PCI Windowing for DDR regions. */
358 284
359#define ADDR_IN_RANGE(addr, low, high) \ 285#define ADDR_IN_RANGE(addr, low, high) \
@@ -368,10 +294,6 @@ static u64 ctx_addr_sig_regs[][3] = {
368#define NETXEN_NIU_HDRSIZE (0x1 << 6) 294#define NETXEN_NIU_HDRSIZE (0x1 << 6)
369#define NETXEN_NIU_TLRSIZE (0x1 << 5) 295#define NETXEN_NIU_TLRSIZE (0x1 << 5)
370 296
371#define lower32(x) ((u32)((x) & 0xffffffff))
372#define upper32(x) \
373 ((u32)(((unsigned long long)(x) >> 32) & 0xffffffff))
374
375#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL 297#define NETXEN_NIC_ZERO_PAUSE_ADDR 0ULL
376#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL 298#define NETXEN_NIC_UNIT_PAUSE_ADDR 0x200ULL
377#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL 299#define NETXEN_NIC_EPG_PAUSE_ADDR1 0x2200010000c28001ULL
@@ -556,171 +478,6 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
556 return 0; 478 return 0;
557} 479}
558 480
559/*
560 * check if the firmware has been downloaded and ready to run and
561 * setup the address for the descriptors in the adapter
562 */
563int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
564{
565 struct netxen_hardware_context *hw = &adapter->ahw;
566 u32 state = 0;
567 void *addr;
568 int err = 0;
569 int ctx, ring;
570 struct netxen_recv_context *recv_ctx;
571 struct netxen_rcv_desc_ctx *rcv_desc;
572 int func_id = adapter->portnum;
573
574 err = netxen_receive_peg_ready(adapter);
575 if (err) {
576 printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
577 state);
578 return err;
579 }
580 adapter->intr_scheme = adapter->pci_read_normalize(adapter,
581 CRB_NIC_CAPABILITIES_FW);
582 adapter->msi_mode = adapter->pci_read_normalize(adapter,
583 CRB_NIC_MSI_MODE_FW);
584
585 addr = pci_alloc_consistent(adapter->pdev,
586 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
587 &adapter->ctx_desc_phys_addr);
588
589 if (addr == NULL) {
590 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
591 err = -ENOMEM;
592 return err;
593 }
594 memset(addr, 0, sizeof(struct netxen_ring_ctx));
595 adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
596 adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
597 adapter->ctx_desc->cmd_consumer_offset =
598 cpu_to_le64(adapter->ctx_desc_phys_addr +
599 sizeof(struct netxen_ring_ctx));
600 adapter->cmd_consumer = (__le32 *) (((char *)addr) +
601 sizeof(struct netxen_ring_ctx));
602
603 addr = pci_alloc_consistent(adapter->pdev,
604 sizeof(struct cmd_desc_type0) *
605 adapter->max_tx_desc_count,
606 &hw->cmd_desc_phys_addr);
607
608 if (addr == NULL) {
609 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
610 netxen_free_hw_resources(adapter);
611 return -ENOMEM;
612 }
613
614 adapter->ctx_desc->cmd_ring_addr =
615 cpu_to_le64(hw->cmd_desc_phys_addr);
616 adapter->ctx_desc->cmd_ring_size =
617 cpu_to_le32(adapter->max_tx_desc_count);
618
619 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
620
621 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
622 recv_ctx = &adapter->recv_ctx[ctx];
623
624 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
625 rcv_desc = &recv_ctx->rcv_desc[ring];
626 addr = pci_alloc_consistent(adapter->pdev,
627 RCV_DESC_RINGSIZE,
628 &rcv_desc->phys_addr);
629 if (addr == NULL) {
630 DPRINTK(ERR, "bad return from "
631 "pci_alloc_consistent\n");
632 netxen_free_hw_resources(adapter);
633 err = -ENOMEM;
634 return err;
635 }
636 rcv_desc->desc_head = (struct rcv_desc *)addr;
637 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
638 cpu_to_le64(rcv_desc->phys_addr);
639 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
640 cpu_to_le32(rcv_desc->max_rx_desc_count);
641 rcv_desc->crb_rcv_producer =
642 recv_crb_registers[adapter->portnum].
643 crb_rcv_producer[ring];
644 }
645
646 addr = pci_alloc_consistent(adapter->pdev, STATUS_DESC_RINGSIZE,
647 &recv_ctx->rcv_status_desc_phys_addr);
648 if (addr == NULL) {
649 DPRINTK(ERR, "bad return from"
650 " pci_alloc_consistent\n");
651 netxen_free_hw_resources(adapter);
652 err = -ENOMEM;
653 return err;
654 }
655 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
656 adapter->ctx_desc->sts_ring_addr =
657 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
658 adapter->ctx_desc->sts_ring_size =
659 cpu_to_le32(adapter->max_rx_desc_count);
660 recv_ctx->crb_sts_consumer =
661 recv_crb_registers[adapter->portnum].crb_sts_consumer;
662
663 }
664 /* Window = 1 */
665
666 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id),
667 lower32(adapter->ctx_desc_phys_addr));
668 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id),
669 upper32(adapter->ctx_desc_phys_addr));
670 adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id),
671 NETXEN_CTX_SIGNATURE | func_id);
672 return err;
673}
674
675void netxen_free_hw_resources(struct netxen_adapter *adapter)
676{
677 struct netxen_recv_context *recv_ctx;
678 struct netxen_rcv_desc_ctx *rcv_desc;
679 int ctx, ring;
680
681 if (adapter->ctx_desc != NULL) {
682 pci_free_consistent(adapter->pdev,
683 sizeof(struct netxen_ring_ctx) +
684 sizeof(uint32_t),
685 adapter->ctx_desc,
686 adapter->ctx_desc_phys_addr);
687 adapter->ctx_desc = NULL;
688 }
689
690 if (adapter->ahw.cmd_desc_head != NULL) {
691 pci_free_consistent(adapter->pdev,
692 sizeof(struct cmd_desc_type0) *
693 adapter->max_tx_desc_count,
694 adapter->ahw.cmd_desc_head,
695 adapter->ahw.cmd_desc_phys_addr);
696 adapter->ahw.cmd_desc_head = NULL;
697 }
698
699 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
700 recv_ctx = &adapter->recv_ctx[ctx];
701 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
702 rcv_desc = &recv_ctx->rcv_desc[ring];
703
704 if (rcv_desc->desc_head != NULL) {
705 pci_free_consistent(adapter->pdev,
706 RCV_DESC_RINGSIZE,
707 rcv_desc->desc_head,
708 rcv_desc->phys_addr);
709 rcv_desc->desc_head = NULL;
710 }
711 }
712
713 if (recv_ctx->rcv_status_desc_head != NULL) {
714 pci_free_consistent(adapter->pdev,
715 STATUS_DESC_RINGSIZE,
716 recv_ctx->rcv_status_desc_head,
717 recv_ctx->
718 rcv_status_desc_phys_addr);
719 recv_ctx->rcv_status_desc_head = NULL;
720 }
721 }
722}
723
724void netxen_tso_check(struct netxen_adapter *adapter, 481void netxen_tso_check(struct netxen_adapter *adapter,
725 struct cmd_desc_type0 *desc, struct sk_buff *skb) 482 struct cmd_desc_type0 *desc, struct sk_buff *skb)
726{ 483{
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7c3fbc4a5723..d222436bd5bd 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -158,21 +158,21 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
158void netxen_release_rx_buffers(struct netxen_adapter *adapter) 158void netxen_release_rx_buffers(struct netxen_adapter *adapter)
159{ 159{
160 struct netxen_recv_context *recv_ctx; 160 struct netxen_recv_context *recv_ctx;
161 struct netxen_rcv_desc_ctx *rcv_desc; 161 struct nx_host_rds_ring *rds_ring;
162 struct netxen_rx_buffer *rx_buf; 162 struct netxen_rx_buffer *rx_buf;
163 int i, ctxid, ring; 163 int i, ctxid, ring;
164 164
165 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { 165 for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) {
166 recv_ctx = &adapter->recv_ctx[ctxid]; 166 recv_ctx = &adapter->recv_ctx[ctxid];
167 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { 167 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
168 rcv_desc = &recv_ctx->rcv_desc[ring]; 168 rds_ring = &recv_ctx->rds_rings[ring];
169 for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) { 169 for (i = 0; i < rds_ring->max_rx_desc_count; ++i) {
170 rx_buf = &(rcv_desc->rx_buf_arr[i]); 170 rx_buf = &(rds_ring->rx_buf_arr[i]);
171 if (rx_buf->state == NETXEN_BUFFER_FREE) 171 if (rx_buf->state == NETXEN_BUFFER_FREE)
172 continue; 172 continue;
173 pci_unmap_single(adapter->pdev, 173 pci_unmap_single(adapter->pdev,
174 rx_buf->dma, 174 rx_buf->dma,
175 rcv_desc->dma_size, 175 rds_ring->dma_size,
176 PCI_DMA_FROMDEVICE); 176 PCI_DMA_FROMDEVICE);
177 if (rx_buf->skb != NULL) 177 if (rx_buf->skb != NULL)
178 dev_kfree_skb_any(rx_buf->skb); 178 dev_kfree_skb_any(rx_buf->skb);
@@ -216,16 +216,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
216void netxen_free_sw_resources(struct netxen_adapter *adapter) 216void netxen_free_sw_resources(struct netxen_adapter *adapter)
217{ 217{
218 struct netxen_recv_context *recv_ctx; 218 struct netxen_recv_context *recv_ctx;
219 struct netxen_rcv_desc_ctx *rcv_desc; 219 struct nx_host_rds_ring *rds_ring;
220 int ctx, ring; 220 int ctx, ring;
221 221
222 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { 222 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
223 recv_ctx = &adapter->recv_ctx[ctx]; 223 recv_ctx = &adapter->recv_ctx[ctx];
224 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { 224 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
225 rcv_desc = &recv_ctx->rcv_desc[ring]; 225 rds_ring = &recv_ctx->rds_rings[ring];
226 if (rcv_desc->rx_buf_arr) { 226 if (rds_ring->rx_buf_arr) {
227 vfree(rcv_desc->rx_buf_arr); 227 vfree(rds_ring->rx_buf_arr);
228 rcv_desc->rx_buf_arr = NULL; 228 rds_ring->rx_buf_arr = NULL;
229 } 229 }
230 } 230 }
231 } 231 }
@@ -237,7 +237,7 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter)
237int netxen_alloc_sw_resources(struct netxen_adapter *adapter) 237int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
238{ 238{
239 struct netxen_recv_context *recv_ctx; 239 struct netxen_recv_context *recv_ctx;
240 struct netxen_rcv_desc_ctx *rcv_desc; 240 struct nx_host_rds_ring *rds_ring;
241 struct netxen_rx_buffer *rx_buf; 241 struct netxen_rx_buffer *rx_buf;
242 int ctx, ring, i, num_rx_bufs; 242 int ctx, ring, i, num_rx_bufs;
243 243
@@ -255,52 +255,52 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
255 255
256 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { 256 for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) {
257 recv_ctx = &adapter->recv_ctx[ctx]; 257 recv_ctx = &adapter->recv_ctx[ctx];
258 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { 258 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
259 rcv_desc = &recv_ctx->rcv_desc[ring]; 259 rds_ring = &recv_ctx->rds_rings[ring];
260 switch (RCV_DESC_TYPE(ring)) { 260 switch (RCV_DESC_TYPE(ring)) {
261 case RCV_DESC_NORMAL: 261 case RCV_DESC_NORMAL:
262 rcv_desc->max_rx_desc_count = 262 rds_ring->max_rx_desc_count =
263 adapter->max_rx_desc_count; 263 adapter->max_rx_desc_count;
264 rcv_desc->flags = RCV_DESC_NORMAL; 264 rds_ring->flags = RCV_DESC_NORMAL;
265 rcv_desc->dma_size = RX_DMA_MAP_LEN; 265 rds_ring->dma_size = RX_DMA_MAP_LEN;
266 rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH; 266 rds_ring->skb_size = MAX_RX_BUFFER_LENGTH;
267 break; 267 break;
268 268
269 case RCV_DESC_JUMBO: 269 case RCV_DESC_JUMBO:
270 rcv_desc->max_rx_desc_count = 270 rds_ring->max_rx_desc_count =
271 adapter->max_jumbo_rx_desc_count; 271 adapter->max_jumbo_rx_desc_count;
272 rcv_desc->flags = RCV_DESC_JUMBO; 272 rds_ring->flags = RCV_DESC_JUMBO;
273 rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN; 273 rds_ring->dma_size = RX_JUMBO_DMA_MAP_LEN;
274 rcv_desc->skb_size = 274 rds_ring->skb_size =
275 MAX_RX_JUMBO_BUFFER_LENGTH; 275 MAX_RX_JUMBO_BUFFER_LENGTH;
276 break; 276 break;
277 277
278 case RCV_RING_LRO: 278 case RCV_RING_LRO:
279 rcv_desc->max_rx_desc_count = 279 rds_ring->max_rx_desc_count =
280 adapter->max_lro_rx_desc_count; 280 adapter->max_lro_rx_desc_count;
281 rcv_desc->flags = RCV_DESC_LRO; 281 rds_ring->flags = RCV_DESC_LRO;
282 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN; 282 rds_ring->dma_size = RX_LRO_DMA_MAP_LEN;
283 rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH; 283 rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
284 break; 284 break;
285 285
286 } 286 }
287 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) 287 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
288 vmalloc(RCV_BUFFSIZE); 288 vmalloc(RCV_BUFFSIZE);
289 if (rcv_desc->rx_buf_arr == NULL) { 289 if (rds_ring->rx_buf_arr == NULL) {
290 printk(KERN_ERR "%s: Failed to allocate " 290 printk(KERN_ERR "%s: Failed to allocate "
291 "rx buffer ring %d\n", 291 "rx buffer ring %d\n",
292 netdev->name, ring); 292 netdev->name, ring);
293 /* free whatever was already allocated */ 293 /* free whatever was already allocated */
294 goto err_out; 294 goto err_out;
295 } 295 }
296 memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE); 296 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
297 rcv_desc->begin_alloc = 0; 297 rds_ring->begin_alloc = 0;
298 /* 298 /*
299 * Now go through all of them, set reference handles 299 * Now go through all of them, set reference handles
300 * and put them in the queues. 300 * and put them in the queues.
301 */ 301 */
302 num_rx_bufs = rcv_desc->max_rx_desc_count; 302 num_rx_bufs = rds_ring->max_rx_desc_count;
303 rx_buf = rcv_desc->rx_buf_arr; 303 rx_buf = rds_ring->rx_buf_arr;
304 for (i = 0; i < num_rx_bufs; i++) { 304 for (i = 0; i < num_rx_bufs; i++) {
305 rx_buf->ref_handle = i; 305 rx_buf->ref_handle = i;
306 rx_buf->state = NETXEN_BUFFER_FREE; 306 rx_buf->state = NETXEN_BUFFER_FREE;
@@ -1154,7 +1154,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1154 struct sk_buff *skb; 1154 struct sk_buff *skb;
1155 u32 length = netxen_get_sts_totallength(sts_data); 1155 u32 length = netxen_get_sts_totallength(sts_data);
1156 u32 desc_ctx; 1156 u32 desc_ctx;
1157 struct netxen_rcv_desc_ctx *rcv_desc; 1157 struct nx_host_rds_ring *rds_ring;
1158 int ret; 1158 int ret;
1159 1159
1160 desc_ctx = netxen_get_sts_type(sts_data); 1160 desc_ctx = netxen_get_sts_type(sts_data);
@@ -1164,13 +1164,13 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1164 return; 1164 return;
1165 } 1165 }
1166 1166
1167 rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; 1167 rds_ring = &recv_ctx->rds_rings[desc_ctx];
1168 if (unlikely(index > rcv_desc->max_rx_desc_count)) { 1168 if (unlikely(index > rds_ring->max_rx_desc_count)) {
1169 DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", 1169 DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
1170 index, rcv_desc->max_rx_desc_count); 1170 index, rds_ring->max_rx_desc_count);
1171 return; 1171 return;
1172 } 1172 }
1173 buffer = &rcv_desc->rx_buf_arr[index]; 1173 buffer = &rds_ring->rx_buf_arr[index];
1174 if (desc_ctx == RCV_DESC_LRO_CTXID) { 1174 if (desc_ctx == RCV_DESC_LRO_CTXID) {
1175 buffer->lro_current_frags++; 1175 buffer->lro_current_frags++;
1176 if (netxen_get_sts_desc_lro_last_frag(desc)) { 1176 if (netxen_get_sts_desc_lro_last_frag(desc)) {
@@ -1191,7 +1191,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1191 } 1191 }
1192 } 1192 }
1193 1193
1194 pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, 1194 pci_unmap_single(pdev, buffer->dma, rds_ring->dma_size,
1195 PCI_DMA_FROMDEVICE); 1195 PCI_DMA_FROMDEVICE);
1196 1196
1197 skb = (struct sk_buff *)buffer->skb; 1197 skb = (struct sk_buff *)buffer->skb;
@@ -1249,7 +1249,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1249 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); 1249 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
1250 count++; 1250 count++;
1251 } 1251 }
1252 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) 1252 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1253 netxen_post_rx_buffers_nodb(adapter, ctxid, ring); 1253 netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
1254 1254
1255 /* update the consumer index in phantom */ 1255 /* update the consumer index in phantom */
@@ -1340,7 +1340,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1340 struct pci_dev *pdev = adapter->pdev; 1340 struct pci_dev *pdev = adapter->pdev;
1341 struct sk_buff *skb; 1341 struct sk_buff *skb;
1342 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); 1342 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1343 struct netxen_rcv_desc_ctx *rcv_desc = NULL; 1343 struct nx_host_rds_ring *rds_ring = NULL;
1344 uint producer; 1344 uint producer;
1345 struct rcv_desc *pdesc; 1345 struct rcv_desc *pdesc;
1346 struct netxen_rx_buffer *buffer; 1346 struct netxen_rx_buffer *buffer;
@@ -1349,27 +1349,27 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1349 netxen_ctx_msg msg = 0; 1349 netxen_ctx_msg msg = 0;
1350 dma_addr_t dma; 1350 dma_addr_t dma;
1351 1351
1352 rcv_desc = &recv_ctx->rcv_desc[ringid]; 1352 rds_ring = &recv_ctx->rds_rings[ringid];
1353 1353
1354 producer = rcv_desc->producer; 1354 producer = rds_ring->producer;
1355 index = rcv_desc->begin_alloc; 1355 index = rds_ring->begin_alloc;
1356 buffer = &rcv_desc->rx_buf_arr[index]; 1356 buffer = &rds_ring->rx_buf_arr[index];
1357 /* We can start writing rx descriptors into the phantom memory. */ 1357 /* We can start writing rx descriptors into the phantom memory. */
1358 while (buffer->state == NETXEN_BUFFER_FREE) { 1358 while (buffer->state == NETXEN_BUFFER_FREE) {
1359 skb = dev_alloc_skb(rcv_desc->skb_size); 1359 skb = dev_alloc_skb(rds_ring->skb_size);
1360 if (unlikely(!skb)) { 1360 if (unlikely(!skb)) {
1361 /* 1361 /*
1362 * TODO 1362 * TODO
1363 * We need to schedule the posting of buffers to the pegs. 1363 * We need to schedule the posting of buffers to the pegs.
1364 */ 1364 */
1365 rcv_desc->begin_alloc = index; 1365 rds_ring->begin_alloc = index;
1366 DPRINTK(ERR, "netxen_post_rx_buffers: " 1366 DPRINTK(ERR, "netxen_post_rx_buffers: "
1367 " allocated only %d buffers\n", count); 1367 " allocated only %d buffers\n", count);
1368 break; 1368 break;
1369 } 1369 }
1370 1370
1371 count++; /* now there should be no failure */ 1371 count++; /* now there should be no failure */
1372 pdesc = &rcv_desc->desc_head[producer]; 1372 pdesc = &rds_ring->desc_head[producer];
1373 1373
1374#if defined(XGB_DEBUG) 1374#if defined(XGB_DEBUG)
1375 *(unsigned long *)(skb->head) = 0xc0debabe; 1375 *(unsigned long *)(skb->head) = 0xc0debabe;
@@ -1382,7 +1382,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1382 * buffer after it has been filled FSL TBD TBD 1382 * buffer after it has been filled FSL TBD TBD
1383 * skb->dev = netdev; 1383 * skb->dev = netdev;
1384 */ 1384 */
1385 dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, 1385 dma = pci_map_single(pdev, skb->data, rds_ring->dma_size,
1386 PCI_DMA_FROMDEVICE); 1386 PCI_DMA_FROMDEVICE);
1387 pdesc->addr_buffer = cpu_to_le64(dma); 1387 pdesc->addr_buffer = cpu_to_le64(dma);
1388 buffer->skb = skb; 1388 buffer->skb = skb;
@@ -1390,36 +1390,40 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1390 buffer->dma = dma; 1390 buffer->dma = dma;
1391 /* make a rcv descriptor */ 1391 /* make a rcv descriptor */
1392 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1392 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1393 pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); 1393 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1394 DPRINTK(INFO, "done writing descripter\n"); 1394 DPRINTK(INFO, "done writing descripter\n");
1395 producer = 1395 producer =
1396 get_next_index(producer, rcv_desc->max_rx_desc_count); 1396 get_next_index(producer, rds_ring->max_rx_desc_count);
1397 index = get_next_index(index, rcv_desc->max_rx_desc_count); 1397 index = get_next_index(index, rds_ring->max_rx_desc_count);
1398 buffer = &rcv_desc->rx_buf_arr[index]; 1398 buffer = &rds_ring->rx_buf_arr[index];
1399 } 1399 }
1400 /* if we did allocate buffers, then write the count to Phantom */ 1400 /* if we did allocate buffers, then write the count to Phantom */
1401 if (count) { 1401 if (count) {
1402 rcv_desc->begin_alloc = index; 1402 rds_ring->begin_alloc = index;
1403 rcv_desc->producer = producer; 1403 rds_ring->producer = producer;
1404 /* Window = 1 */ 1404 /* Window = 1 */
1405 adapter->pci_write_normalize(adapter, 1405 adapter->pci_write_normalize(adapter,
1406 rcv_desc->crb_rcv_producer, 1406 rds_ring->crb_rcv_producer,
1407 (producer-1) & (rcv_desc->max_rx_desc_count-1)); 1407 (producer-1) & (rds_ring->max_rx_desc_count-1));
1408
1409 if (adapter->fw_major < 4) {
1408 /* 1410 /*
1409 * Write a doorbell msg to tell phanmon of change in 1411 * Write a doorbell msg to tell phanmon of change in
1410 * receive ring producer 1412 * receive ring producer
1413 * Only for firmware version < 4.0.0
1411 */ 1414 */
1412 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); 1415 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1413 netxen_set_msg_privid(msg); 1416 netxen_set_msg_privid(msg);
1414 netxen_set_msg_count(msg, 1417 netxen_set_msg_count(msg,
1415 ((producer - 1418 ((producer -
1416 1) & (rcv_desc-> 1419 1) & (rds_ring->
1417 max_rx_desc_count - 1))); 1420 max_rx_desc_count - 1)));
1418 netxen_set_msg_ctxid(msg, adapter->portnum); 1421 netxen_set_msg_ctxid(msg, adapter->portnum);
1419 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); 1422 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1420 writel(msg, 1423 writel(msg,
1421 DB_NORMALIZE(adapter, 1424 DB_NORMALIZE(adapter,
1422 NETXEN_RCV_PRODUCER_OFFSET)); 1425 NETXEN_RCV_PRODUCER_OFFSET));
1426 }
1423 } 1427 }
1424} 1428}
1425 1429
@@ -1429,32 +1433,32 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1429 struct pci_dev *pdev = adapter->pdev; 1433 struct pci_dev *pdev = adapter->pdev;
1430 struct sk_buff *skb; 1434 struct sk_buff *skb;
1431 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); 1435 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1432 struct netxen_rcv_desc_ctx *rcv_desc = NULL; 1436 struct nx_host_rds_ring *rds_ring = NULL;
1433 u32 producer; 1437 u32 producer;
1434 struct rcv_desc *pdesc; 1438 struct rcv_desc *pdesc;
1435 struct netxen_rx_buffer *buffer; 1439 struct netxen_rx_buffer *buffer;
1436 int count = 0; 1440 int count = 0;
1437 int index = 0; 1441 int index = 0;
1438 1442
1439 rcv_desc = &recv_ctx->rcv_desc[ringid]; 1443 rds_ring = &recv_ctx->rds_rings[ringid];
1440 1444
1441 producer = rcv_desc->producer; 1445 producer = rds_ring->producer;
1442 index = rcv_desc->begin_alloc; 1446 index = rds_ring->begin_alloc;
1443 buffer = &rcv_desc->rx_buf_arr[index]; 1447 buffer = &rds_ring->rx_buf_arr[index];
1444 /* We can start writing rx descriptors into the phantom memory. */ 1448 /* We can start writing rx descriptors into the phantom memory. */
1445 while (buffer->state == NETXEN_BUFFER_FREE) { 1449 while (buffer->state == NETXEN_BUFFER_FREE) {
1446 skb = dev_alloc_skb(rcv_desc->skb_size); 1450 skb = dev_alloc_skb(rds_ring->skb_size);
1447 if (unlikely(!skb)) { 1451 if (unlikely(!skb)) {
1448 /* 1452 /*
1449 * We need to schedule the posting of buffers to the pegs. 1453 * We need to schedule the posting of buffers to the pegs.
1450 */ 1454 */
1451 rcv_desc->begin_alloc = index; 1455 rds_ring->begin_alloc = index;
1452 DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " 1456 DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
1453 " allocated only %d buffers\n", count); 1457 " allocated only %d buffers\n", count);
1454 break; 1458 break;
1455 } 1459 }
1456 count++; /* now there should be no failure */ 1460 count++; /* now there should be no failure */
1457 pdesc = &rcv_desc->desc_head[producer]; 1461 pdesc = &rds_ring->desc_head[producer];
1458 skb_reserve(skb, 2); 1462 skb_reserve(skb, 2);
1459 /* 1463 /*
1460 * This will be setup when we receive the 1464 * This will be setup when we receive the
@@ -1464,27 +1468,27 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1464 buffer->skb = skb; 1468 buffer->skb = skb;
1465 buffer->state = NETXEN_BUFFER_BUSY; 1469 buffer->state = NETXEN_BUFFER_BUSY;
1466 buffer->dma = pci_map_single(pdev, skb->data, 1470 buffer->dma = pci_map_single(pdev, skb->data,
1467 rcv_desc->dma_size, 1471 rds_ring->dma_size,
1468 PCI_DMA_FROMDEVICE); 1472 PCI_DMA_FROMDEVICE);
1469 1473
1470 /* make a rcv descriptor */ 1474 /* make a rcv descriptor */
1471 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); 1475 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1472 pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); 1476 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1473 pdesc->addr_buffer = cpu_to_le64(buffer->dma); 1477 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1474 producer = 1478 producer =
1475 get_next_index(producer, rcv_desc->max_rx_desc_count); 1479 get_next_index(producer, rds_ring->max_rx_desc_count);
1476 index = get_next_index(index, rcv_desc->max_rx_desc_count); 1480 index = get_next_index(index, rds_ring->max_rx_desc_count);
1477 buffer = &rcv_desc->rx_buf_arr[index]; 1481 buffer = &rds_ring->rx_buf_arr[index];
1478 } 1482 }
1479 1483
1480 /* if we did allocate buffers, then write the count to Phantom */ 1484 /* if we did allocate buffers, then write the count to Phantom */
1481 if (count) { 1485 if (count) {
1482 rcv_desc->begin_alloc = index; 1486 rds_ring->begin_alloc = index;
1483 rcv_desc->producer = producer; 1487 rds_ring->producer = producer;
1484 /* Window = 1 */ 1488 /* Window = 1 */
1485 adapter->pci_write_normalize(adapter, 1489 adapter->pci_write_normalize(adapter,
1486 rcv_desc->crb_rcv_producer, 1490 rds_ring->crb_rcv_producer,
1487 (producer-1) & (rcv_desc->max_rx_desc_count-1)); 1491 (producer-1) & (rds_ring->max_rx_desc_count-1));
1488 wmb(); 1492 wmb();
1489 } 1493 }
1490} 1494}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 2d0963f4d194..03d796d19ad9 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -151,22 +151,17 @@ static uint32_t msi_tgt_status[8] = {
151 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7 151 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
152}; 152};
153 153
154static uint32_t sw_int_mask[4] = {
155 CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
156 CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
157};
158
159static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG; 154static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
160 155
161static void netxen_nic_disable_int(struct netxen_adapter *adapter) 156static void netxen_nic_disable_int(struct netxen_adapter *adapter)
162{ 157{
163 u32 mask = 0x7ff; 158 u32 mask = 0x7ff;
164 int retries = 32; 159 int retries = 32;
165 int port = adapter->portnum;
166 int pci_fn = adapter->ahw.pci_func; 160 int pci_fn = adapter->ahw.pci_func;
167 161
168 if (adapter->msi_mode != MSI_MODE_MULTIFUNC) 162 if (adapter->msi_mode != MSI_MODE_MULTIFUNC)
169 adapter->pci_write_normalize(adapter, sw_int_mask[port], 0); 163 adapter->pci_write_normalize(adapter,
164 adapter->crb_intr_mask, 0);
170 165
171 if (adapter->intr_scheme != -1 && 166 if (adapter->intr_scheme != -1 &&
172 adapter->intr_scheme != INTR_SCHEME_PERPORT) 167 adapter->intr_scheme != INTR_SCHEME_PERPORT)
@@ -198,7 +193,6 @@ static void netxen_nic_disable_int(struct netxen_adapter *adapter)
198static void netxen_nic_enable_int(struct netxen_adapter *adapter) 193static void netxen_nic_enable_int(struct netxen_adapter *adapter)
199{ 194{
200 u32 mask; 195 u32 mask;
201 int port = adapter->portnum;
202 196
203 DPRINTK(1, INFO, "Entered ISR Enable \n"); 197 DPRINTK(1, INFO, "Entered ISR Enable \n");
204 198
@@ -219,7 +213,7 @@ static void netxen_nic_enable_int(struct netxen_adapter *adapter)
219 adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask); 213 adapter->pci_write_immediate(adapter, ISR_INT_MASK, mask);
220 } 214 }
221 215
222 adapter->pci_write_normalize(adapter, sw_int_mask[port], 0x1); 216 adapter->pci_write_normalize(adapter, adapter->crb_intr_mask, 0x1);
223 217
224 if (!NETXEN_IS_MSI_FAMILY(adapter)) { 218 if (!NETXEN_IS_MSI_FAMILY(adapter)) {
225 mask = 0xbff; 219 mask = 0xbff;
@@ -710,10 +704,13 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
710 adapter->status &= ~NETXEN_NETDEV_STATUS; 704 adapter->status &= ~NETXEN_NETDEV_STATUS;
711 adapter->rx_csum = 1; 705 adapter->rx_csum = 1;
712 adapter->mc_enabled = 0; 706 adapter->mc_enabled = 0;
713 if (NX_IS_REVISION_P3(revision_id)) 707 if (NX_IS_REVISION_P3(revision_id)) {
714 adapter->max_mc_count = 38; 708 adapter->max_mc_count = 38;
715 else 709 adapter->max_rds_rings = 2;
710 } else {
716 adapter->max_mc_count = 16; 711 adapter->max_mc_count = 16;
712 adapter->max_rds_rings = 3;
713 }
717 714
718 netdev->open = netxen_nic_open; 715 netdev->open = netxen_nic_open;
719 netdev->stop = netxen_nic_close; 716 netdev->stop = netxen_nic_close;
@@ -1081,7 +1078,7 @@ static int netxen_nic_open(struct net_device *netdev)
1081 netxen_nic_update_cmd_consumer(adapter, 0); 1078 netxen_nic_update_cmd_consumer(adapter, 0);
1082 1079
1083 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 1080 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
1084 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) 1081 for (ring = 0; ring < adapter->max_rds_rings; ring++)
1085 netxen_post_rx_buffers(adapter, ctx, ring); 1082 netxen_post_rx_buffers(adapter, ctx, ring);
1086 } 1083 }
1087 if (NETXEN_IS_MSI_FAMILY(adapter)) 1084 if (NETXEN_IS_MSI_FAMILY(adapter))
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 09d070512362..a63762394a83 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -161,8 +161,6 @@ struct netxen_recv_crb {
161 u32 crb_sts_consumer; 161 u32 crb_sts_consumer;
162}; 162};
163 163
164extern struct netxen_recv_crb recv_crb_registers[];
165
166/* 164/*
167 * Temperature control. 165 * Temperature control.
168 */ 166 */