aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/qlge/qlge.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/qlge/qlge.h')
-rw-r--r--drivers/net/qlge/qlge.h61
1 files changed, 37 insertions, 24 deletions
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 459663a4023d..c1dadadfab18 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -28,11 +28,11 @@
28 } while (0) 28 } while (0)
29 29
30#define QLGE_VENDOR_ID 0x1077 30#define QLGE_VENDOR_ID 0x1077
31#define QLGE_DEVICE_ID1 0x8012 31#define QLGE_DEVICE_ID 0x8012
32#define QLGE_DEVICE_ID 0x8000
33 32
34#define MAX_RX_RINGS 128 33#define MAX_CPUS 8
35#define MAX_TX_RINGS 128 34#define MAX_TX_RINGS MAX_CPUS
35#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
36 36
37#define NUM_TX_RING_ENTRIES 256 37#define NUM_TX_RING_ENTRIES 256
38#define NUM_RX_RING_ENTRIES 256 38#define NUM_RX_RING_ENTRIES 256
@@ -45,6 +45,7 @@
45#define MAX_SPLIT_SIZE 1023 45#define MAX_SPLIT_SIZE 1023
46#define QLGE_SB_PAD 32 46#define QLGE_SB_PAD 32
47 47
48#define MAX_CQ 128
48#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ 49#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
49#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ 50#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
50#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) 51#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
@@ -961,8 +962,7 @@ struct ib_mac_iocb_rsp {
961#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ 962#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
962#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ 963#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
963 __le32 data_len; /* */ 964 __le32 data_len; /* */
964 __le32 data_addr_lo; /* */ 965 __le64 data_addr; /* */
965 __le32 data_addr_hi; /* */
966 __le32 rss; /* */ 966 __le32 rss; /* */
967 __le16 vlan_id; /* 12 bits */ 967 __le16 vlan_id; /* 12 bits */
968#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ 968#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
@@ -976,8 +976,7 @@ struct ib_mac_iocb_rsp {
976#define IB_MAC_IOCB_RSP_HS 0x40 976#define IB_MAC_IOCB_RSP_HS 0x40
977#define IB_MAC_IOCB_RSP_HL 0x80 977#define IB_MAC_IOCB_RSP_HL 0x80
978 __le32 hdr_len; /* */ 978 __le32 hdr_len; /* */
979 __le32 hdr_addr_lo; /* */ 979 __le64 hdr_addr; /* */
980 __le32 hdr_addr_hi; /* */
981} __attribute((packed)); 980} __attribute((packed));
982 981
983struct ib_ae_iocb_rsp { 982struct ib_ae_iocb_rsp {
@@ -1042,10 +1041,8 @@ struct wqicb {
1042 __le16 cq_id_rss; 1041 __le16 cq_id_rss;
1043#define Q_CQ_ID_RSS_RV 0x8000 1042#define Q_CQ_ID_RSS_RV 0x8000
1044 __le16 rid; 1043 __le16 rid;
1045 __le32 addr_lo; 1044 __le64 addr;
1046 __le32 addr_hi; 1045 __le64 cnsmr_idx_addr;
1047 __le32 cnsmr_idx_addr_lo;
1048 __le32 cnsmr_idx_addr_hi;
1049} __attribute((packed)); 1046} __attribute((packed));
1050 1047
1051/* 1048/*
@@ -1070,18 +1067,14 @@ struct cqicb {
1070#define LEN_CPP_64 0x0002 1067#define LEN_CPP_64 0x0002
1071#define LEN_CPP_128 0x0003 1068#define LEN_CPP_128 0x0003
1072 __le16 rid; 1069 __le16 rid;
1073 __le32 addr_lo; 1070 __le64 addr;
1074 __le32 addr_hi; 1071 __le64 prod_idx_addr;
1075 __le32 prod_idx_addr_lo;
1076 __le32 prod_idx_addr_hi;
1077 __le16 pkt_delay; 1072 __le16 pkt_delay;
1078 __le16 irq_delay; 1073 __le16 irq_delay;
1079 __le32 lbq_addr_lo; 1074 __le64 lbq_addr;
1080 __le32 lbq_addr_hi;
1081 __le16 lbq_buf_size; 1075 __le16 lbq_buf_size;
1082 __le16 lbq_len; /* entry count */ 1076 __le16 lbq_len; /* entry count */
1083 __le32 sbq_addr_lo; 1077 __le64 sbq_addr;
1084 __le32 sbq_addr_hi;
1085 __le16 sbq_buf_size; 1078 __le16 sbq_buf_size;
1086 __le16 sbq_len; /* entry count */ 1079 __le16 sbq_len; /* entry count */
1087} __attribute((packed)); 1080} __attribute((packed));
@@ -1145,7 +1138,7 @@ struct tx_ring {
1145 struct wqicb wqicb; /* structure used to inform chip of new queue */ 1138 struct wqicb wqicb; /* structure used to inform chip of new queue */
1146 void *wq_base; /* pci_alloc:virtual addr for tx */ 1139 void *wq_base; /* pci_alloc:virtual addr for tx */
1147 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ 1140 dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
1148 u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ 1141 __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
1149 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ 1142 dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
1150 u32 wq_size; /* size in bytes of queue area */ 1143 u32 wq_size; /* size in bytes of queue area */
1151 u32 wq_len; /* number of entries in queue */ 1144 u32 wq_len; /* number of entries in queue */
@@ -1181,7 +1174,7 @@ struct rx_ring {
1181 u32 cq_size; 1174 u32 cq_size;
1182 u32 cq_len; 1175 u32 cq_len;
1183 u16 cq_id; 1176 u16 cq_id;
1184 volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ 1177 __le32 *prod_idx_sh_reg; /* Shadowed producer register. */
1185 dma_addr_t prod_idx_sh_reg_dma; 1178 dma_addr_t prod_idx_sh_reg_dma;
1186 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ 1179 void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
1187 u32 cnsmr_idx; /* current sw idx */ 1180 u32 cnsmr_idx; /* current sw idx */
@@ -1402,9 +1395,11 @@ struct ql_adapter {
1402 int rx_ring_count; 1395 int rx_ring_count;
1403 int ring_mem_size; 1396 int ring_mem_size;
1404 void *ring_mem; 1397 void *ring_mem;
1405 struct rx_ring *rx_ring; 1398
1399 struct rx_ring rx_ring[MAX_RX_RINGS];
1400 struct tx_ring tx_ring[MAX_TX_RINGS];
1401
1406 int rx_csum; 1402 int rx_csum;
1407 struct tx_ring *tx_ring;
1408 u32 default_rx_queue; 1403 u32 default_rx_queue;
1409 1404
1410 u16 rx_coalesce_usecs; /* cqicb->int_delay */ 1405 u16 rx_coalesce_usecs; /* cqicb->int_delay */
@@ -1459,6 +1454,24 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr)
1459 mmiowb(); 1454 mmiowb();
1460} 1455}
1461 1456
1457/*
1458 * Shadow Registers:
1459 * Outbound queues have a consumer index that is maintained by the chip.
1460 * Inbound queues have a producer index that is maintained by the chip.
1461 * For lower overhead, these registers are "shadowed" to host memory
1462 * which allows the device driver to track the queue progress without
1463 * PCI reads. When an entry is placed on an inbound queue, the chip will
1464 * update the relevant index register and then copy the value to the
1465 * shadow register in host memory.
1466 */
1467static inline u32 ql_read_sh_reg(__le32 *addr)
1468{
1469 u32 reg;
1470 reg = le32_to_cpu(*addr);
1471 rmb();
1472 return reg;
1473}
1474
1462extern char qlge_driver_name[]; 1475extern char qlge_driver_name[];
1463extern const char qlge_driver_version[]; 1476extern const char qlge_driver_version[];
1464extern const struct ethtool_ops qlge_ethtool_ops; 1477extern const struct ethtool_ops qlge_ethtool_ops;