aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/netxen
diff options
context:
space:
mode:
authorAmit S. Kale <amitkale@netxen.com>2006-12-04 12:23:25 -0500
committerJeff Garzik <jeff@garzik.org>2006-12-04 18:36:03 -0500
commited25ffa16434724f5ed825aa48734c7f3aefa203 (patch)
tree71cff36d0b2f43adf20e67ac6cc3ba3020f94ff2 /drivers/net/netxen
parent80922fbcb6f00127e91580e7565bb665947ac5d3 (diff)
[PATCH] NetXen: multiport firmware support, ioctl interface
NetXen: 1G/10G Ethernet driver updates - Multiport and newer firmware support - ioctl interface for user level tools - Cast error fix for multiport Signed-off-by: Amit S. Kale <amitkale@netxen.com> netxen_nic.h | 281 +++++++++++++++++++++++++------- netxen_nic_ethtool.c | 12 - netxen_nic_hw.c | 429 +++++++++++++++++++++++++++++++++++++++++--------- netxen_nic_init.c | 301 ++++++++++++++++++++++++++++++----- netxen_nic_ioctl.h | 2 netxen_nic_isr.c | 3 netxen_nic_main.c | 260 ++++++++++++++++++------------ netxen_nic_niu.c | 22 +- netxen_nic_phan_reg.h | 228 ++++++++++++++++---------- 9 files changed, 1161 insertions(+), 377 deletions(-) Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/netxen')
-rw-r--r--drivers/net/netxen/netxen_nic.h281
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c12
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c429
-rw-r--r--drivers/net/netxen/netxen_nic_init.c301
-rw-r--r--drivers/net/netxen/netxen_nic_ioctl.h2
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c260
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c22
-rw-r--r--drivers/net/netxen/netxen_nic_phan_reg.h228
9 files changed, 1161 insertions, 377 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d51f43709cb5..3151aaa7906e 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -63,27 +63,49 @@
63 63
64#include "netxen_nic_hw.h" 64#include "netxen_nic_hw.h"
65 65
66#define NETXEN_NIC_BUILD_NO "5" 66#define NETXEN_NIC_BUILD_NO "1"
67#define _NETXEN_NIC_LINUX_MAJOR 2 67#define _NETXEN_NIC_LINUX_MAJOR 3
68#define _NETXEN_NIC_LINUX_MINOR 3 68#define _NETXEN_NIC_LINUX_MINOR 3
69#define _NETXEN_NIC_LINUX_SUBVERSION 59 69#define _NETXEN_NIC_LINUX_SUBVERSION 2
70#define NETXEN_NIC_LINUX_VERSIONID "2.3.59" "-" NETXEN_NIC_BUILD_NO 70#define NETXEN_NIC_LINUX_VERSIONID "3.3.2" "-" NETXEN_NIC_BUILD_NO
71#define NETXEN_NIC_FW_VERSIONID "2.3.59" 71#define NETXEN_NIC_FW_VERSIONID "3.3.2"
72 72
73#define RCV_DESC_RINGSIZE \ 73#define RCV_DESC_RINGSIZE \
74 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count) 74 (sizeof(struct rcv_desc) * adapter->max_rx_desc_count)
75#define STATUS_DESC_RINGSIZE \ 75#define STATUS_DESC_RINGSIZE \
76 (sizeof(struct status_desc)* adapter->max_rx_desc_count) 76 (sizeof(struct status_desc)* adapter->max_rx_desc_count)
77#define LRO_DESC_RINGSIZE \
78 (sizeof(rcvDesc_t) * adapter->max_lro_rx_desc_count)
77#define TX_RINGSIZE \ 79#define TX_RINGSIZE \
78 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count) 80 (sizeof(struct netxen_cmd_buffer) * adapter->max_tx_desc_count)
79#define RCV_BUFFSIZE \ 81#define RCV_BUFFSIZE \
80 (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count) 82 (sizeof(struct netxen_rx_buffer) * rcv_desc->max_rx_desc_count)
81#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a))) 83#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
82 84
83#define NETXEN_NETDEV_STATUS 0x1 85#define NETXEN_NETDEV_STATUS 0x1
86#define NETXEN_RCV_PRODUCER_OFFSET 0
87#define NETXEN_RCV_PEG_DB_ID 2
88#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
84 89
85#define ADDR_IN_WINDOW1(off) \ 90#define ADDR_IN_WINDOW1(off) \
86 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 91 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
92/*
93 * In netxen_nic_down(), we must wait for any pending callback requests into
94 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
95 * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
96 * does this synchronization.
97 *
98 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
99 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
100 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
101 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
102 * linkwatch_event() to be executed which also attempts to acquire the rtnl
103 * lock thus causing a deadlock.
104 */
105
106#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
107#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
108extern struct workqueue_struct *netxen_workq;
87 109
88/* 110/*
89 * normalize a 64MB crb address to 32MB PCI window 111 * normalize a 64MB crb address to 32MB PCI window
@@ -95,8 +117,14 @@
95#define NETXEN_CRB_NORMALIZE(adapter, reg) \ 117#define NETXEN_CRB_NORMALIZE(adapter, reg) \
96 pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg)) 118 pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
97 119
120#define DB_NORMALIZE(adapter, off) \
121 (adapter->ahw.db_base + (off))
122
123#define NX_P2_C0 0x24
124#define NX_P2_C1 0x25
125
98#define FIRST_PAGE_GROUP_START 0 126#define FIRST_PAGE_GROUP_START 0
99#define FIRST_PAGE_GROUP_END 0x400000 127#define FIRST_PAGE_GROUP_END 0x100000
100 128
101#define SECOND_PAGE_GROUP_START 0x4000000 129#define SECOND_PAGE_GROUP_START 0x4000000
102#define SECOND_PAGE_GROUP_END 0x66BC000 130#define SECOND_PAGE_GROUP_END 0x66BC000
@@ -108,11 +136,13 @@
108#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START 136#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
109#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START 137#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
110 138
111#define MAX_RX_BUFFER_LENGTH 2000 139#define MAX_RX_BUFFER_LENGTH 1760
112#define MAX_RX_JUMBO_BUFFER_LENGTH 9046 140#define MAX_RX_JUMBO_BUFFER_LENGTH 9046
113#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - NET_IP_ALIGN) 141#define MAX_RX_LRO_BUFFER_LENGTH ((48*1024)-512)
142#define RX_DMA_MAP_LEN (MAX_RX_BUFFER_LENGTH - 2)
114#define RX_JUMBO_DMA_MAP_LEN \ 143#define RX_JUMBO_DMA_MAP_LEN \
115 (MAX_RX_JUMBO_BUFFER_LENGTH - NET_IP_ALIGN) 144 (MAX_RX_JUMBO_BUFFER_LENGTH - 2)
145#define RX_LRO_DMA_MAP_LEN (MAX_RX_LRO_BUFFER_LENGTH - 2)
116#define NETXEN_ROM_ROUNDUP 0x80000000ULL 146#define NETXEN_ROM_ROUNDUP 0x80000000ULL
117 147
118/* 148/*
@@ -151,30 +181,38 @@ enum {
151/* Host writes the following to notify that it has done the init-handshake */ 181/* Host writes the following to notify that it has done the init-handshake */
152#define PHAN_INITIALIZE_ACK 0xf00f 182#define PHAN_INITIALIZE_ACK 0xf00f
153 183
154#define NUM_RCV_DESC_RINGS 2 /* No of Rcv Descriptor contexts */ 184#define NUM_RCV_DESC_RINGS 3 /* No of Rcv Descriptor contexts */
155 185
156/* descriptor types */ 186/* descriptor types */
157#define RCV_DESC_NORMAL 0x01 187#define RCV_DESC_NORMAL 0x01
158#define RCV_DESC_JUMBO 0x02 188#define RCV_DESC_JUMBO 0x02
189#define RCV_DESC_LRO 0x04
159#define RCV_DESC_NORMAL_CTXID 0 190#define RCV_DESC_NORMAL_CTXID 0
160#define RCV_DESC_JUMBO_CTXID 1 191#define RCV_DESC_JUMBO_CTXID 1
192#define RCV_DESC_LRO_CTXID 2
161 193
162#define RCV_DESC_TYPE(ID) \ 194#define RCV_DESC_TYPE(ID) \
163 ((ID == RCV_DESC_JUMBO_CTXID) ? RCV_DESC_JUMBO : RCV_DESC_NORMAL) 195 ((ID == RCV_DESC_JUMBO_CTXID) \
196 ? RCV_DESC_JUMBO \
197 : ((ID == RCV_DESC_LRO_CTXID) \
198 ? RCV_DESC_LRO : \
199 (RCV_DESC_NORMAL)))
164 200
165#define MAX_CMD_DESCRIPTORS 1024 201#define MAX_CMD_DESCRIPTORS 1024
166#define MAX_RCV_DESCRIPTORS 32768 202#define MAX_RCV_DESCRIPTORS 32768
167#define MAX_JUMBO_RCV_DESCRIPTORS 4096 203#define MAX_JUMBO_RCV_DESCRIPTORS 4096
204#define MAX_LRO_RCV_DESCRIPTORS 2048
168#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS 205#define MAX_RCVSTATUS_DESCRIPTORS MAX_RCV_DESCRIPTORS
169#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS 206#define MAX_JUMBO_RCV_DESC MAX_JUMBO_RCV_DESCRIPTORS
170#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS 207#define MAX_RCV_DESC MAX_RCV_DESCRIPTORS
171#define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS 208#define MAX_RCVSTATUS_DESC MAX_RCV_DESCRIPTORS
172#define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS)
173#define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8) 209#define MAX_EPG_DESCRIPTORS (MAX_CMD_DESCRIPTORS * 8)
174 210#define NUM_RCV_DESC (MAX_RCV_DESC + MAX_JUMBO_RCV_DESCRIPTORS + \
211 MAX_LRO_RCV_DESCRIPTORS)
175#define MIN_TX_COUNT 4096 212#define MIN_TX_COUNT 4096
176#define MIN_RX_COUNT 4096 213#define MIN_RX_COUNT 4096
177 214#define NETXEN_CTX_SIGNATURE 0xdee0
215#define NETXEN_RCV_PRODUCER(ringid) (ringid)
178#define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */ 216#define MAX_FRAME_SIZE 0x10000 /* 64K MAX size for LSO */
179 217
180#define PHAN_PEG_RCV_INITIALIZED 0xff01 218#define PHAN_PEG_RCV_INITIALIZED 0xff01
@@ -186,6 +224,67 @@ enum {
186#define get_index_range(index,length,count) \ 224#define get_index_range(index,length,count) \
187 (((index) + (count)) & ((length) - 1)) 225 (((index) + (count)) & ((length) - 1))
188 226
227#define MPORT_SINGLE_FUNCTION_MODE 0x1111
228
229extern unsigned long long netxen_dma_mask;
230
231/*
232 * NetXen host-peg signal message structure
233 *
234 * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx
235 * Bit 2 : priv_id => must be 1
236 * Bit 3-17 : count => for doorbell
237 * Bit 18-27 : ctx_id => Context id
238 * Bit 28-31 : opcode
239 */
240
241typedef u32 netxen_ctx_msg;
242
243#define _netxen_set_bits(config_word, start, bits, val) {\
244 unsigned long long mask = (((1ULL << (bits)) - 1) << (start)); \
245 unsigned long long value = (val); \
246 (config_word) &= ~mask; \
247 (config_word) |= (((value) << (start)) & mask); \
248}
249
250#define netxen_set_msg_peg_id(config_word, val) \
251 _netxen_set_bits(config_word, 0, 2, val)
252#define netxen_set_msg_privid(config_word) \
253 set_bit(2, (unsigned long*)&config_word)
254#define netxen_set_msg_count(config_word, val) \
255 _netxen_set_bits(config_word, 3, 15, val)
256#define netxen_set_msg_ctxid(config_word, val) \
257 _netxen_set_bits(config_word, 18, 10, val)
258#define netxen_set_msg_opcode(config_word, val) \
259 _netxen_set_bits(config_word, 28, 4, val)
260
261struct netxen_rcv_context {
262 u32 rcv_ring_addr_lo;
263 u32 rcv_ring_addr_hi;
264 u32 rcv_ring_size;
265 u32 rsrvd;
266};
267
268struct netxen_ring_ctx {
269
270 /* one command ring */
271 u64 cmd_consumer_offset;
272 u32 cmd_ring_addr_lo;
273 u32 cmd_ring_addr_hi;
274 u32 cmd_ring_size;
275 u32 rsrvd;
276
277 /* three receive rings */
278 struct netxen_rcv_context rcv_ctx[3];
279
280 /* one status ring */
281 u32 sts_ring_addr_lo;
282 u32 sts_ring_addr_hi;
283 u32 sts_ring_size;
284
285 u32 ctx_id;
286} __attribute__ ((aligned(64)));
287
189/* 288/*
190 * Following data structures describe the descriptors that will be used. 289 * Following data structures describe the descriptors that will be used.
191 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when 290 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -203,22 +302,32 @@ enum {
203#define FLAGS_IPSEC_SA_DELETE 0x08 302#define FLAGS_IPSEC_SA_DELETE 0x08
204#define FLAGS_VLAN_TAGGED 0x10 303#define FLAGS_VLAN_TAGGED 0x10
205 304
206#define CMD_DESC_TOTAL_LENGTH(cmd_desc) \ 305#define netxen_set_cmd_desc_port(cmd_desc, var) \
207 ((cmd_desc)->length_tcp_hdr & 0x00FFFFFF) 306 ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
208#define CMD_DESC_TCP_HDR_OFFSET(cmd_desc) \
209 (((cmd_desc)->length_tcp_hdr >> 24) & 0x0FF)
210#define CMD_DESC_PORT(cmd_desc) ((cmd_desc)->port_ctxid & 0x0F)
211#define CMD_DESC_CTX_ID(cmd_desc) (((cmd_desc)->port_ctxid >> 4) & 0x0F)
212 307
213#define CMD_DESC_TOTAL_LENGTH_WRT(cmd_desc, var) \ 308#define netxen_set_cmd_desc_flags(cmd_desc, val) \
214 ((cmd_desc)->length_tcp_hdr |= ((var) & 0x00FFFFFF)) 309 _netxen_set_bits((cmd_desc)->flags_opcode, 0, 7, val)
215#define CMD_DESC_TCP_HDR_OFFSET_WRT(cmd_desc, var) \ 310#define netxen_set_cmd_desc_opcode(cmd_desc, val) \
216 ((cmd_desc)->length_tcp_hdr |= (((var) << 24) & 0xFF000000)) 311 _netxen_set_bits((cmd_desc)->flags_opcode, 7, 6, val)
217#define CMD_DESC_PORT_WRT(cmd_desc, var) \ 312
218 ((cmd_desc)->port_ctxid |= ((var) & 0x0F)) 313#define netxen_set_cmd_desc_num_of_buff(cmd_desc, val) \
314 _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 0, 8, val);
315#define netxen_set_cmd_desc_totallength(cmd_desc, val) \
316 _netxen_set_bits((cmd_desc)->num_of_buffers_total_length, 8, 24, val);
317
318#define netxen_get_cmd_desc_opcode(cmd_desc) \
319 (((cmd_desc)->flags_opcode >> 7) & 0x003F)
320#define netxen_get_cmd_desc_totallength(cmd_desc) \
321 (((cmd_desc)->num_of_buffers_total_length >> 8) & 0x0FFFFFF)
219 322
220struct cmd_desc_type0 { 323struct cmd_desc_type0 {
221 u64 netxen_next; /* for fragments handled by Phantom */ 324 u8 tcp_hdr_offset; /* For LSO only */
325 u8 ip_hdr_offset; /* For LSO only */
326 /* Bit pattern: 0-6 flags, 7-12 opcode, 13-15 unused */
327 u16 flags_opcode;
328 /* Bit pattern: 0-7 total number of segments,
329 8-31 Total size of the packet */
330 u32 num_of_buffers_total_length;
222 union { 331 union {
223 struct { 332 struct {
224 u32 addr_low_part2; 333 u32 addr_low_part2;
@@ -227,13 +336,6 @@ struct cmd_desc_type0 {
227 u64 addr_buffer2; 336 u64 addr_buffer2;
228 }; 337 };
229 338
230 /* Bit pattern: 0-23 total length, 24-32 tcp header offset */
231 u32 length_tcp_hdr;
232 u8 ip_hdr_offset; /* For LSO only */
233 u8 num_of_buffers; /* total number of segments */
234 u8 flags; /* as defined above */
235 u8 opcode;
236
237 u16 reference_handle; /* changed to u16 to add mss */ 339 u16 reference_handle; /* changed to u16 to add mss */
238 u16 mss; /* passed by NDIS_PACKET for LSO */ 340 u16 mss; /* passed by NDIS_PACKET for LSO */
239 /* Bit pattern 0-3 port, 0-3 ctx id */ 341 /* Bit pattern 0-3 port, 0-3 ctx id */
@@ -248,7 +350,6 @@ struct cmd_desc_type0 {
248 }; 350 };
249 u64 addr_buffer3; 351 u64 addr_buffer3;
250 }; 352 };
251
252 union { 353 union {
253 struct { 354 struct {
254 u32 addr_low_part1; 355 u32 addr_low_part1;
@@ -270,6 +371,8 @@ struct cmd_desc_type0 {
270 u64 addr_buffer4; 371 u64 addr_buffer4;
271 }; 372 };
272 373
374 u64 unused;
375
273} __attribute__ ((aligned(64))); 376} __attribute__ ((aligned(64)));
274 377
275/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ 378/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
@@ -296,22 +399,49 @@ struct rcv_desc {
296#define NETXEN_PROT_UNKNOWN (0) 399#define NETXEN_PROT_UNKNOWN (0)
297 400
298/* Note: sizeof(status_desc) should always be a mutliple of 2 */ 401/* Note: sizeof(status_desc) should always be a mutliple of 2 */
299#define STATUS_DESC_PORT(status_desc) \ 402
300 ((status_desc)->port_status_type_op & 0x0F) 403#define netxen_get_sts_desc_lro_cnt(status_desc) \
301#define STATUS_DESC_STATUS(status_desc) \ 404 ((status_desc)->lro & 0x7F)
302 (((status_desc)->port_status_type_op >> 4) & 0x0F) 405#define netxen_get_sts_desc_lro_last_frag(status_desc) \
303#define STATUS_DESC_TYPE(status_desc) \ 406 (((status_desc)->lro & 0x80) >> 7)
304 (((status_desc)->port_status_type_op >> 8) & 0x0F) 407
305#define STATUS_DESC_OPCODE(status_desc) \ 408#define netxen_get_sts_port(status_desc) \
306 (((status_desc)->port_status_type_op >> 12) & 0x0F) 409 ((status_desc)->status_desc_data & 0x0F)
410#define netxen_get_sts_status(status_desc) \
411 (((status_desc)->status_desc_data >> 4) & 0x0F)
412#define netxen_get_sts_type(status_desc) \
413 (((status_desc)->status_desc_data >> 8) & 0x0F)
414#define netxen_get_sts_totallength(status_desc) \
415 (((status_desc)->status_desc_data >> 12) & 0xFFFF)
416#define netxen_get_sts_refhandle(status_desc) \
417 (((status_desc)->status_desc_data >> 28) & 0xFFFF)
418#define netxen_get_sts_prot(status_desc) \
419 (((status_desc)->status_desc_data >> 44) & 0x0F)
420#define netxen_get_sts_owner(status_desc) \
421 (((status_desc)->status_desc_data >> 56) & 0x03)
422#define netxen_get_sts_opcode(status_desc) \
423 (((status_desc)->status_desc_data >> 58) & 0x03F)
424
425#define netxen_clear_sts_owner(status_desc) \
426 ((status_desc)->status_desc_data &= \
427 ~(((unsigned long long)3) << 56 ))
428#define netxen_set_sts_owner(status_desc, val) \
429 ((status_desc)->status_desc_data |= \
430 (((unsigned long long)((val) & 0x3)) << 56 ))
307 431
308struct status_desc { 432struct status_desc {
309 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-15 opcode */ 433 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
310 u16 port_status_type_op; 434 28-43 reference_handle, 44-47 protocol, 48-52 unused
311 u16 total_length; /* NIC mode */ 435 53-55 desc_cnt, 56-57 owner, 58-63 opcode
312 u16 reference_handle; /* handle for the associated packet */ 436 */
313 /* Bit pattern: 0-1 owner, 2-5 protocol */ 437 u64 status_desc_data;
314 u16 owner; /* Owner of the descriptor */ 438 u32 hash_value;
439 u8 hash_type;
440 u8 msg_type;
441 u8 unused;
442 /* Bit pattern: 0-6 lro_count indicates frag sequence,
443 7 last_frag indicates last frag */
444 u8 lro;
315} __attribute__ ((aligned(8))); 445} __attribute__ ((aligned(8)));
316 446
317enum { 447enum {
@@ -563,7 +693,8 @@ typedef enum {
563#define FLASH_SECONDARY_SIZE (USER_START-SECONDARY_START) 693#define FLASH_SECONDARY_SIZE (USER_START-SECONDARY_START)
564#define NUM_PRIMARY_SECTORS (0x20) 694#define NUM_PRIMARY_SECTORS (0x20)
565#define NUM_CONFIG_SECTORS (1) 695#define NUM_CONFIG_SECTORS (1)
566#define PFX "netxen: " 696#define PFX "NetXen: "
697extern char netxen_nic_driver_name[];
567 698
568/* Note: Make sure to not call this before adapter->port is valid */ 699/* Note: Make sure to not call this before adapter->port is valid */
569#if !defined(NETXEN_DEBUG) 700#if !defined(NETXEN_DEBUG)
@@ -609,7 +740,6 @@ struct netxen_cmd_buffer {
609 u8 frag_count; 740 u8 frag_count;
610 unsigned long time_stamp; 741 unsigned long time_stamp;
611 u32 state; 742 u32 state;
612 u32 no_of_descriptors;
613}; 743};
614 744
615/* In rx_buffer, we do not need multiple fragments as is a single buffer */ 745/* In rx_buffer, we do not need multiple fragments as is a single buffer */
@@ -618,6 +748,9 @@ struct netxen_rx_buffer {
618 u64 dma; 748 u64 dma;
619 u16 ref_handle; 749 u16 ref_handle;
620 u16 state; 750 u16 state;
751 u32 lro_expected_frags;
752 u32 lro_current_frags;
753 u32 lro_length;
621}; 754};
622 755
623/* Board types */ 756/* Board types */
@@ -633,6 +766,8 @@ struct netxen_hardware_context {
633 void __iomem *pci_base0; 766 void __iomem *pci_base0;
634 void __iomem *pci_base1; 767 void __iomem *pci_base1;
635 void __iomem *pci_base2; 768 void __iomem *pci_base2;
769 void __iomem *db_base;
770 unsigned long db_len;
636 771
637 u8 revision_id; 772 u8 revision_id;
638 u16 board_type; 773 u16 board_type;
@@ -642,14 +777,13 @@ struct netxen_hardware_context {
642 u32 qg_linksup; 777 u32 qg_linksup;
643 /* Address of cmd ring in Phantom */ 778 /* Address of cmd ring in Phantom */
644 struct cmd_desc_type0 *cmd_desc_head; 779 struct cmd_desc_type0 *cmd_desc_head;
645 char *pauseaddr;
646 struct pci_dev *cmd_desc_pdev; 780 struct pci_dev *cmd_desc_pdev;
647 dma_addr_t cmd_desc_phys_addr; 781 dma_addr_t cmd_desc_phys_addr;
648 dma_addr_t pause_physaddr;
649 struct pci_dev *pause_pdev;
650 struct netxen_adapter *adapter; 782 struct netxen_adapter *adapter;
651}; 783};
652 784
785#define RCV_RING_LRO RCV_DESC_LRO
786
653#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ 787#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
654#define ETHERNET_FCS_SIZE 4 788#define ETHERNET_FCS_SIZE 4
655 789
@@ -702,6 +836,13 @@ struct netxen_recv_context {
702}; 836};
703 837
704#define NETXEN_NIC_MSI_ENABLED 0x02 838#define NETXEN_NIC_MSI_ENABLED 0x02
839#define NETXEN_DMA_MASK 0xfffffffe
840#define NETXEN_DB_MAPSIZE_BYTES 0x1000
841
842struct netxen_dummy_dma {
843 void *addr;
844 dma_addr_t phys_addr;
845};
705 846
706struct netxen_adapter { 847struct netxen_adapter {
707 struct netxen_hardware_context ahw; 848 struct netxen_hardware_context ahw;
@@ -711,18 +852,19 @@ struct netxen_adapter {
711 spinlock_t tx_lock; 852 spinlock_t tx_lock;
712 spinlock_t lock; 853 spinlock_t lock;
713 struct work_struct watchdog_task; 854 struct work_struct watchdog_task;
714 struct work_struct tx_timeout_task; 855 struct work_struct tx_timeout_task[NETXEN_MAX_PORTS];
715 struct timer_list watchdog_timer; 856 struct timer_list watchdog_timer;
716 857
717 u32 curr_window; 858 u32 curr_window;
718 859
719 u32 cmd_producer; 860 u32 cmd_producer;
720 u32 cmd_consumer; 861 u32 *cmd_consumer;
721 862
722 u32 last_cmd_consumer; 863 u32 last_cmd_consumer;
723 u32 max_tx_desc_count; 864 u32 max_tx_desc_count;
724 u32 max_rx_desc_count; 865 u32 max_rx_desc_count;
725 u32 max_jumbo_rx_desc_count; 866 u32 max_jumbo_rx_desc_count;
867 u32 max_lro_rx_desc_count;
726 /* Num of instances active on cmd buffer ring */ 868 /* Num of instances active on cmd buffer ring */
727 u32 proc_cmd_buf_counter; 869 u32 proc_cmd_buf_counter;
728 870
@@ -744,6 +886,13 @@ struct netxen_adapter {
744 struct netxen_recv_context recv_ctx[MAX_RCV_CTX]; 886 struct netxen_recv_context recv_ctx[MAX_RCV_CTX];
745 887
746 int is_up; 888 int is_up;
889 int number;
890 struct netxen_dummy_dma dummy_dma;
891
892 /* Context interface shared between card and host */
893 struct netxen_ring_ctx *ctx_desc;
894 struct pci_dev *ctx_desc_pdev;
895 dma_addr_t ctx_desc_phys_addr;
747 int (*enable_phy_interrupts) (struct netxen_adapter *, int); 896 int (*enable_phy_interrupts) (struct netxen_adapter *, int);
748 int (*disable_phy_interrupts) (struct netxen_adapter *, int); 897 int (*disable_phy_interrupts) (struct netxen_adapter *, int);
749 void (*handle_phy_intr) (struct netxen_adapter *); 898 void (*handle_phy_intr) (struct netxen_adapter *);
@@ -758,7 +907,6 @@ struct netxen_adapter {
758 int (*init_port) (struct netxen_adapter *, int); 907 int (*init_port) (struct netxen_adapter *, int);
759 void (*init_niu) (struct netxen_adapter *); 908 void (*init_niu) (struct netxen_adapter *);
760 int (*stop_port) (struct netxen_adapter *, int); 909 int (*stop_port) (struct netxen_adapter *, int);
761
762}; /* netxen_adapter structure */ 910}; /* netxen_adapter structure */
763 911
764/* Max number of xmit producer threads that can run simultaneously */ 912/* Max number of xmit producer threads that can run simultaneously */
@@ -840,8 +988,6 @@ static inline void __iomem *pci_base(struct netxen_adapter *adapter,
840 return NULL; 988 return NULL;
841} 989}
842 990
843extern char netxen_nic_driver_name[];
844
845int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter, 991int netxen_niu_xgbe_enable_phy_interrupts(struct netxen_adapter *adapter,
846 int port); 992 int port);
847int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter, 993int netxen_niu_gbe_enable_phy_interrupts(struct netxen_adapter *adapter,
@@ -880,10 +1026,20 @@ int netxen_nic_hw_read_wx(struct netxen_adapter *adapter, u64 off, void *data,
880 int len); 1026 int len);
881int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data, 1027int netxen_nic_hw_write_wx(struct netxen_adapter *adapter, u64 off, void *data,
882 int len); 1028 int len);
1029int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
1030 void *data, int len);
1031int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
1032 void *data, int len);
1033int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter,
1034 u64 off, void *data, int size);
1035int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
1036 u64 off, void *data, int size);
883void netxen_crb_writelit_adapter(struct netxen_adapter *adapter, 1037void netxen_crb_writelit_adapter(struct netxen_adapter *adapter,
884 unsigned long off, int data); 1038 unsigned long off, int data);
885 1039
886/* Functions from netxen_nic_init.c */ 1040/* Functions from netxen_nic_init.c */
1041void netxen_free_adapter_offload(struct netxen_adapter *adapter);
1042int netxen_initialize_adapter_offload(struct netxen_adapter *adapter);
887void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val); 1043void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
888void netxen_load_firmware(struct netxen_adapter *adapter); 1044void netxen_load_firmware(struct netxen_adapter *adapter);
889int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1045int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose);
@@ -918,7 +1074,9 @@ int netxen_nic_tx_has_work(struct netxen_adapter *adapter);
918void netxen_watchdog_task(unsigned long v); 1074void netxen_watchdog_task(unsigned long v);
919void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, 1075void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx,
920 u32 ringid); 1076 u32 ringid);
921void netxen_process_cmd_ring(unsigned long data); 1077void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, u32 ctx,
1078 u32 ringid);
1079int netxen_process_cmd_ring(unsigned long data);
922u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max); 1080u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctx, int max);
923void netxen_nic_set_multi(struct net_device *netdev); 1081void netxen_nic_set_multi(struct net_device *netdev);
924int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1082int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
@@ -1012,7 +1170,6 @@ static inline void get_brd_name_by_type(u32 type, char *name)
1012 1170
1013int netxen_is_flash_supported(struct netxen_adapter *adapter); 1171int netxen_is_flash_supported(struct netxen_adapter *adapter);
1014int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]); 1172int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 mac[]);
1015
1016extern void netxen_change_ringparam(struct netxen_adapter *adapter); 1173extern void netxen_change_ringparam(struct netxen_adapter *adapter);
1017extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, 1174extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
1018 int *valp); 1175 int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index c7fcbf345db9..2ab4885cc950 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -459,20 +459,22 @@ netxen_nic_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring)
459{ 459{
460 struct netxen_port *port = netdev_priv(dev); 460 struct netxen_port *port = netdev_priv(dev);
461 struct netxen_adapter *adapter = port->adapter; 461 struct netxen_adapter *adapter = port->adapter;
462 int i, j; 462 int i;
463 463
464 ring->rx_pending = 0; 464 ring->rx_pending = 0;
465 ring->rx_jumbo_pending = 0;
465 for (i = 0; i < MAX_RCV_CTX; ++i) { 466 for (i = 0; i < MAX_RCV_CTX; ++i) {
466 for (j = 0; j < NUM_RCV_DESC_RINGS; j++) 467 ring->rx_pending += adapter->recv_ctx[i].
467 ring->rx_pending += 468 rcv_desc[RCV_DESC_NORMAL_CTXID].rcv_pending;
468 adapter->recv_ctx[i].rcv_desc[j].rcv_pending; 469 ring->rx_jumbo_pending += adapter->recv_ctx[i].
470 rcv_desc[RCV_DESC_JUMBO_CTXID].rcv_pending;
469 } 471 }
470 472
471 ring->rx_max_pending = adapter->max_rx_desc_count; 473 ring->rx_max_pending = adapter->max_rx_desc_count;
472 ring->tx_max_pending = adapter->max_tx_desc_count; 474 ring->tx_max_pending = adapter->max_tx_desc_count;
475 ring->rx_jumbo_max_pending = adapter->max_jumbo_rx_desc_count;
473 ring->rx_mini_max_pending = 0; 476 ring->rx_mini_max_pending = 0;
474 ring->rx_mini_pending = 0; 477 ring->rx_mini_pending = 0;
475 ring->rx_jumbo_max_pending = 0;
476 ring->rx_jumbo_pending = 0; 478 ring->rx_jumbo_pending = 0;
477} 479}
478 480
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 7470852ab582..9147b6048dfb 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -42,7 +42,7 @@
42 42
43#define NETXEN_FLASH_BASE (BOOTLD_START) 43#define NETXEN_FLASH_BASE (BOOTLD_START)
44#define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE) 44#define NETXEN_PHANTOM_MEM_BASE (NETXEN_FLASH_BASE)
45#define NETXEN_MAX_MTU 8000 45#define NETXEN_MAX_MTU 8000 + NETXEN_ENET_HEADER_SIZE + NETXEN_ETH_FCS_SIZE
46#define NETXEN_MIN_MTU 64 46#define NETXEN_MIN_MTU 64
47#define NETXEN_ETH_FCS_SIZE 4 47#define NETXEN_ETH_FCS_SIZE 4
48#define NETXEN_ENET_HEADER_SIZE 14 48#define NETXEN_ENET_HEADER_SIZE 14
@@ -176,11 +176,9 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
176 struct netxen_hardware_context *hw = &adapter->ahw; 176 struct netxen_hardware_context *hw = &adapter->ahw;
177 u32 state = 0; 177 u32 state = 0;
178 void *addr; 178 void *addr;
179 void *pause_addr;
180 int loops = 0, err = 0; 179 int loops = 0, err = 0;
181 int ctx, ring; 180 int ctx, ring;
182 u32 card_cmdring = 0; 181 u32 card_cmdring = 0;
183 struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
184 struct netxen_recv_context *recv_ctx; 182 struct netxen_recv_context *recv_ctx;
185 struct netxen_rcv_desc_ctx *rcv_desc; 183 struct netxen_rcv_desc_ctx *rcv_desc;
186 184
@@ -224,33 +222,42 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
224 DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n"); 222 DPRINTK(INFO, "Recieve Peg ready too. starting stuff\n");
225 223
226 addr = netxen_alloc(adapter->ahw.pdev, 224 addr = netxen_alloc(adapter->ahw.pdev,
227 sizeof(struct cmd_desc_type0) * 225 sizeof(struct netxen_ring_ctx) +
228 adapter->max_tx_desc_count, 226 sizeof(uint32_t),
229 &hw->cmd_desc_phys_addr, &hw->cmd_desc_pdev); 227 (dma_addr_t *) & adapter->ctx_desc_phys_addr,
228 &adapter->ctx_desc_pdev);
230 229
230 printk("ctx_desc_phys_addr: 0x%llx\n",
231 (u64) adapter->ctx_desc_phys_addr);
231 if (addr == NULL) { 232 if (addr == NULL) {
232 DPRINTK(ERR, "bad return from pci_alloc_consistent\n"); 233 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
233 return -ENOMEM; 234 err = -ENOMEM;
235 return err;
234 } 236 }
237 memset(addr, 0, sizeof(struct netxen_ring_ctx));
238 adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
239 adapter->ctx_desc->cmd_consumer_offset = adapter->ctx_desc_phys_addr
240 + sizeof(struct netxen_ring_ctx);
241 adapter->cmd_consumer = (uint32_t *) (((char *)addr) +
242 sizeof(struct netxen_ring_ctx));
243
244 addr = pci_alloc_consistent(adapter->ahw.pdev,
245 sizeof(struct cmd_desc_type0) *
246 adapter->max_tx_desc_count,
247 (dma_addr_t *) & hw->cmd_desc_phys_addr);
248 printk("cmd_desc_phys_addr: 0x%llx\n", (u64) hw->cmd_desc_phys_addr);
235 249
236 pause_addr = netxen_alloc(adapter->ahw.pdev, 512, 250 if (addr == NULL) {
237 (dma_addr_t *) & hw->pause_physaddr, 251 DPRINTK(ERR, "bad return from pci_alloc_consistent\n");
238 &hw->pause_pdev); 252 netxen_free_hw_resources(adapter);
239 if (pause_addr == NULL) {
240 DPRINTK(1, ERR, "bad return from pci_alloc_consistent\n");
241 return -ENOMEM; 253 return -ENOMEM;
242 } 254 }
243 255
244 hw->pauseaddr = (char *)pause_addr; 256 adapter->ctx_desc->cmd_ring_addr_lo =
245 { 257 hw->cmd_desc_phys_addr & 0xffffffffUL;
246 u64 *ptr = (u64 *) pause_addr; 258 adapter->ctx_desc->cmd_ring_addr_hi =
247 *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR; 259 ((u64) hw->cmd_desc_phys_addr >> 32);
248 *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR; 260 adapter->ctx_desc->cmd_ring_size = adapter->max_tx_desc_count;
249 *ptr++ = NETXEN_NIC_UNIT_PAUSE_ADDR;
250 *ptr++ = NETXEN_NIC_ZERO_PAUSE_ADDR;
251 *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR1;
252 *ptr++ = NETXEN_NIC_EPG_PAUSE_ADDR2;
253 }
254 261
255 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr; 262 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
256 263
@@ -271,6 +278,12 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
271 return err; 278 return err;
272 } 279 }
273 rcv_desc->desc_head = (struct rcv_desc *)addr; 280 rcv_desc->desc_head = (struct rcv_desc *)addr;
281 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_lo =
282 rcv_desc->phys_addr & 0xffffffffUL;
283 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr_hi =
284 ((u64) rcv_desc->phys_addr >> 32);
285 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
286 rcv_desc->max_rx_desc_count;
274 } 287 }
275 288
276 addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE, 289 addr = netxen_alloc(adapter->ahw.pdev, STATUS_DESC_RINGSIZE,
@@ -284,47 +297,21 @@ int netxen_nic_hw_resources(struct netxen_adapter *adapter)
284 return err; 297 return err;
285 } 298 }
286 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr; 299 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
287 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { 300 adapter->ctx_desc->sts_ring_addr_lo =
288 rcv_desc = &recv_ctx->rcv_desc[ring]; 301 recv_ctx->rcv_status_desc_phys_addr & 0xffffffffUL;
289 rcv_desc_crb = 302 adapter->ctx_desc->sts_ring_addr_hi =
290 &recv_crb_registers[ctx].rcv_desc_crb[ring]; 303 ((u64) recv_ctx->rcv_status_desc_phys_addr >> 32);
291 DPRINTK(INFO, "ring #%d crb global ring reg 0x%x\n", 304 adapter->ctx_desc->sts_ring_size = adapter->max_rx_desc_count;
292 ring, rcv_desc_crb->crb_globalrcv_ring);
293 /* Window = 1 */
294 writel(lower32(rcv_desc->phys_addr),
295 NETXEN_CRB_NORMALIZE(adapter,
296 rcv_desc_crb->
297 crb_globalrcv_ring));
298 DPRINTK(INFO, "GLOBAL_RCV_RING ctx %d, addr 0x%x"
299 " val 0x%llx,"
300 " virt %p\n", ctx,
301 rcv_desc_crb->crb_globalrcv_ring,
302 (unsigned long long)rcv_desc->phys_addr,
303 +rcv_desc->desc_head);
304 }
305 305
306 /* Window = 1 */
307 writel(lower32(recv_ctx->rcv_status_desc_phys_addr),
308 NETXEN_CRB_NORMALIZE(adapter,
309 recv_crb_registers[ctx].
310 crb_rcvstatus_ring));
311 DPRINTK(INFO, "RCVSTATUS_RING, ctx %d, addr 0x%x,"
312 " val 0x%x,virt%p\n",
313 ctx,
314 recv_crb_registers[ctx].crb_rcvstatus_ring,
315 (unsigned long long)recv_ctx->rcv_status_desc_phys_addr,
316 recv_ctx->rcv_status_desc_head);
317 } 306 }
318 /* Window = 1 */ 307 /* Window = 1 */
319 writel(lower32(hw->pause_physaddr), 308
320 NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_LO)); 309 writel(lower32(adapter->ctx_desc_phys_addr),
321 writel(upper32(hw->pause_physaddr), 310 NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_LO));
322 NETXEN_CRB_NORMALIZE(adapter, CRB_PAUSE_ADDR_HI)); 311 writel(upper32(adapter->ctx_desc_phys_addr),
323 312 NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_ADDR_REG_HI));
324 writel(lower32(hw->cmd_desc_phys_addr), 313 writel(NETXEN_CTX_SIGNATURE,
325 NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); 314 NETXEN_CRB_NORMALIZE(adapter, CRB_CTX_SIGNATURE_REG));
326 writel(upper32(hw->cmd_desc_phys_addr),
327 NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_HI));
328 return err; 315 return err;
329} 316}
330 317
@@ -334,6 +321,15 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
334 struct netxen_rcv_desc_ctx *rcv_desc; 321 struct netxen_rcv_desc_ctx *rcv_desc;
335 int ctx, ring; 322 int ctx, ring;
336 323
324 if (adapter->ctx_desc != NULL) {
325 pci_free_consistent(adapter->ctx_desc_pdev,
326 sizeof(struct netxen_ring_ctx) +
327 sizeof(uint32_t),
328 adapter->ctx_desc,
329 adapter->ctx_desc_phys_addr);
330 adapter->ctx_desc = NULL;
331 }
332
337 if (adapter->ahw.cmd_desc_head != NULL) { 333 if (adapter->ahw.cmd_desc_head != NULL) {
338 pci_free_consistent(adapter->ahw.cmd_desc_pdev, 334 pci_free_consistent(adapter->ahw.cmd_desc_pdev,
339 sizeof(struct cmd_desc_type0) * 335 sizeof(struct cmd_desc_type0) *
@@ -342,11 +338,9 @@ void netxen_free_hw_resources(struct netxen_adapter *adapter)
342 adapter->ahw.cmd_desc_phys_addr); 338 adapter->ahw.cmd_desc_phys_addr);
343 adapter->ahw.cmd_desc_head = NULL; 339 adapter->ahw.cmd_desc_head = NULL;
344 } 340 }
345 if (adapter->ahw.pauseaddr != NULL) { 341 /* Special handling: there are 2 ports on this board */
346 pci_free_consistent(adapter->ahw.pause_pdev, 512, 342 if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
347 adapter->ahw.pauseaddr, 343 adapter->ahw.max_ports = 2;
348 adapter->ahw.pause_physaddr);
349 adapter->ahw.pauseaddr = NULL;
350 } 344 }
351 345
352 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 346 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
@@ -381,19 +375,22 @@ void netxen_tso_check(struct netxen_adapter *adapter,
381 desc->total_hdr_length = sizeof(struct ethhdr) + 375 desc->total_hdr_length = sizeof(struct ethhdr) +
382 ((skb->nh.iph)->ihl * sizeof(u32)) + 376 ((skb->nh.iph)->ihl * sizeof(u32)) +
383 ((skb->h.th)->doff * sizeof(u32)); 377 ((skb->h.th)->doff * sizeof(u32));
384 desc->opcode = TX_TCP_LSO; 378 netxen_set_cmd_desc_opcode(desc, TX_TCP_LSO);
385 } else if (skb->ip_summed == CHECKSUM_COMPLETE) { 379 } else if (skb->ip_summed == CHECKSUM_COMPLETE) {
386 if (skb->nh.iph->protocol == IPPROTO_TCP) { 380 if (skb->nh.iph->protocol == IPPROTO_TCP) {
387 desc->opcode = TX_TCP_PKT; 381 netxen_set_cmd_desc_opcode(desc, TX_TCP_PKT);
388 } else if (skb->nh.iph->protocol == IPPROTO_UDP) { 382 } else if (skb->nh.iph->protocol == IPPROTO_UDP) {
389 desc->opcode = TX_UDP_PKT; 383 netxen_set_cmd_desc_opcode(desc, TX_UDP_PKT);
390 } else { 384 } else {
391 return; 385 return;
392 } 386 }
393 } 387 }
394 adapter->stats.xmitcsummed++; 388 adapter->stats.xmitcsummed++;
395 CMD_DESC_TCP_HDR_OFFSET_WRT(desc, skb->h.raw - skb->data); 389 desc->tcp_hdr_offset = skb->h.raw - skb->data;
396 desc->length_tcp_hdr = cpu_to_le32(desc->length_tcp_hdr); 390 netxen_set_cmd_desc_totallength(desc,
391 cpu_to_le32
392 (netxen_get_cmd_desc_totallength
393 (desc)));
397 desc->ip_hdr_offset = skb->nh.raw - skb->data; 394 desc->ip_hdr_offset = skb->nh.raw - skb->data;
398} 395}
399 396
@@ -871,7 +868,7 @@ void netxen_nic_set_link_parameters(struct netxen_port *port)
871{ 868{
872 struct netxen_adapter *adapter = port->adapter; 869 struct netxen_adapter *adapter = port->adapter;
873 __le32 status; 870 __le32 status;
874 u16 autoneg; 871 __le32 autoneg;
875 __le32 mode; 872 __le32 mode;
876 873
877 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode); 874 netxen_nic_read_w0(adapter, NETXEN_NIU_MODE, &mode);
@@ -911,7 +908,7 @@ void netxen_nic_set_link_parameters(struct netxen_port *port)
911 && adapter-> 908 && adapter->
912 phy_read(adapter, port->portnum, 909 phy_read(adapter, port->portnum,
913 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG, 910 NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
914 (__le32 *) & autoneg) != 0) 911 &autoneg) != 0)
915 port->link_autoneg = autoneg; 912 port->link_autoneg = autoneg;
916 } else 913 } else
917 goto link_down; 914 goto link_down;
@@ -1006,3 +1003,291 @@ int netxen_crb_read_val(struct netxen_adapter *adapter, unsigned long off)
1006 netxen_nic_hw_read_wx(adapter, off, &data, 4); 1003 netxen_nic_hw_read_wx(adapter, off, &data, 4);
1007 return data; 1004 return data;
1008} 1005}
1006
1007int netxen_nic_hw_write_ioctl(struct netxen_adapter *adapter, u64 off,
1008 void *data, int len)
1009{
1010 void *addr;
1011 u64 offset = off;
1012 u8 *mem_ptr = NULL;
1013 unsigned long mem_base;
1014 unsigned long mem_page;
1015
1016 if (ADDR_IN_WINDOW1(off)) {
1017 addr = NETXEN_CRB_NORMALIZE(adapter, off);
1018 if (!addr) {
1019 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1020 offset = NETXEN_CRB_NORMAL(off);
1021 mem_page = offset & PAGE_MASK;
1022 if (mem_page != ((offset + len - 1) & PAGE_MASK))
1023 mem_ptr =
1024 ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1025 else
1026 mem_ptr =
1027 ioremap(mem_base + mem_page, PAGE_SIZE);
1028 if (mem_ptr == 0UL) {
1029 return 1;
1030 }
1031 addr = mem_ptr;
1032 addr += offset & (PAGE_SIZE - 1);
1033 }
1034 } else {
1035 addr = pci_base_offset(adapter, off);
1036 if (!addr) {
1037 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1038 mem_page = off & PAGE_MASK;
1039 if (mem_page != ((off + len - 1) & PAGE_MASK))
1040 mem_ptr =
1041 ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1042 else
1043 mem_ptr =
1044 ioremap(mem_base + mem_page, PAGE_SIZE);
1045 if (mem_ptr == 0UL) {
1046 return 1;
1047 }
1048 addr = mem_ptr;
1049 addr += off & (PAGE_SIZE - 1);
1050 }
1051 netxen_nic_pci_change_crbwindow(adapter, 0);
1052 }
1053 switch (len) {
1054 case 1:
1055 writeb(*(u8 *) data, addr);
1056 break;
1057 case 2:
1058 writew(*(u16 *) data, addr);
1059 break;
1060 case 4:
1061 writel(*(u32 *) data, addr);
1062 break;
1063 case 8:
1064 writeq(*(u64 *) data, addr);
1065 break;
1066 default:
1067 DPRINTK(INFO,
1068 "writing data %lx to offset %llx, num words=%d\n",
1069 *(unsigned long *)data, off, (len >> 3));
1070
1071 netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
1072 (len >> 3));
1073 break;
1074 }
1075
1076 if (!ADDR_IN_WINDOW1(off))
1077 netxen_nic_pci_change_crbwindow(adapter, 1);
1078 if (mem_ptr)
1079 iounmap(mem_ptr);
1080 return 0;
1081}
1082
1083int netxen_nic_hw_read_ioctl(struct netxen_adapter *adapter, u64 off,
1084 void *data, int len)
1085{
1086 void *addr;
1087 u64 offset;
1088 u8 *mem_ptr = NULL;
1089 unsigned long mem_base;
1090 unsigned long mem_page;
1091
1092 if (ADDR_IN_WINDOW1(off)) {
1093 addr = NETXEN_CRB_NORMALIZE(adapter, off);
1094 if (!addr) {
1095 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1096 offset = NETXEN_CRB_NORMAL(off);
1097 mem_page = offset & PAGE_MASK;
1098 if (mem_page != ((offset + len - 1) & PAGE_MASK))
1099 mem_ptr =
1100 ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1101 else
1102 mem_ptr =
1103 ioremap(mem_base + mem_page, PAGE_SIZE);
1104 if (mem_ptr == 0UL) {
1105 *(u8 *) data = 0;
1106 return 1;
1107 }
1108 addr = mem_ptr;
1109 addr += offset & (PAGE_SIZE - 1);
1110 }
1111 } else {
1112 addr = pci_base_offset(adapter, off);
1113 if (!addr) {
1114 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1115 mem_page = off & PAGE_MASK;
1116 if (mem_page != ((off + len - 1) & PAGE_MASK))
1117 mem_ptr =
1118 ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1119 else
1120 mem_ptr =
1121 ioremap(mem_base + mem_page, PAGE_SIZE);
1122 if (mem_ptr == 0UL)
1123 return 1;
1124 addr = mem_ptr;
1125 addr += off & (PAGE_SIZE - 1);
1126 }
1127 netxen_nic_pci_change_crbwindow(adapter, 0);
1128 }
1129 switch (len) {
1130 case 1:
1131 *(u8 *) data = readb(addr);
1132 break;
1133 case 2:
1134 *(u16 *) data = readw(addr);
1135 break;
1136 case 4:
1137 *(u32 *) data = readl(addr);
1138 break;
1139 case 8:
1140 *(u64 *) data = readq(addr);
1141 break;
1142 default:
1143 netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
1144 (len >> 3));
1145 break;
1146 }
1147 if (!ADDR_IN_WINDOW1(off))
1148 netxen_nic_pci_change_crbwindow(adapter, 1);
1149 if (mem_ptr)
1150 iounmap(mem_ptr);
1151 return 0;
1152}
1153
1154int netxen_nic_pci_mem_write_ioctl(struct netxen_adapter *adapter, u64 off,
1155 void *data, int size)
1156{
1157 void *addr;
1158 int ret = 0;
1159 u8 *mem_ptr = NULL;
1160 unsigned long mem_base;
1161 unsigned long mem_page;
1162
1163 if (data == NULL || off > (128 * 1024 * 1024)) {
1164 printk(KERN_ERR "%s: data: %p off:%llx\n",
1165 netxen_nic_driver_name, data, off);
1166 return 1;
1167 }
1168 off = netxen_nic_pci_set_window(adapter, off);
1169 /* Corner case : Malicious user tried to break the driver by reading
1170 last few bytes in ranges and tries to read further addresses.
1171 */
1172 if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
1173 printk(KERN_ERR "%s: Invalid access to memory address range"
1174 " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
1175 off + size);
1176 return 1;
1177 }
1178 addr = pci_base_offset(adapter, off);
1179 DPRINTK(INFO, "writing data %llx to offset %llx\n",
1180 *(unsigned long long *)data, off);
1181 if (!addr) {
1182 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1183 mem_page = off & PAGE_MASK;
1184 /* Map two pages whenever user tries to access addresses in two
1185 consecutive pages.
1186 */
1187 if (mem_page != ((off + size - 1) & PAGE_MASK))
1188 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1189 else
1190 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
1191 if (mem_ptr == 0UL) {
1192 return 1;
1193 }
1194 addr = mem_ptr;
1195 addr += off & (PAGE_SIZE - 1);
1196 }
1197 switch (size) {
1198 case 1:
1199 writeb(*(u8 *) data, addr);
1200 break;
1201 case 2:
1202 writew(*(u16 *) data, addr);
1203 break;
1204 case 4:
1205 writel(*(u32 *) data, addr);
1206 break;
1207 case 8:
1208 writeq(*(u64 *) data, addr);
1209 break;
1210 default:
1211 DPRINTK(INFO,
1212 "writing data %lx to offset %llx, num words=%d\n",
1213 *(unsigned long *)data, off, (size >> 3));
1214
1215 netxen_nic_hw_block_write64((u64 __iomem *) data, addr,
1216 (size >> 3));
1217 break;
1218 }
1219
1220 if (mem_ptr)
1221 iounmap(mem_ptr);
1222 DPRINTK(INFO, "wrote %llx\n", *(unsigned long long *)data);
1223
1224 return ret;
1225}
1226
1227int netxen_nic_pci_mem_read_ioctl(struct netxen_adapter *adapter,
1228 u64 off, void *data, int size)
1229{
1230 void *addr;
1231 int ret = 0;
1232 u8 *mem_ptr = NULL;
1233 unsigned long mem_base;
1234 unsigned long mem_page;
1235
1236 if (data == NULL || off > (128 * 1024 * 1024)) {
1237 printk(KERN_ERR "%s: data: %p off:%llx\n",
1238 netxen_nic_driver_name, data, off);
1239 return 1;
1240 }
1241 off = netxen_nic_pci_set_window(adapter, off);
1242 /* Corner case : Malicious user tried to break the driver by reading
1243 last few bytes in ranges and tries to read further addresses.
1244 */
1245 if (!pci_base(adapter, off + size - 1) && pci_base(adapter, off)) {
1246 printk(KERN_ERR "%s: Invalid access to memory address range"
1247 " 0x%llx - 0x%llx\n", netxen_nic_driver_name, off,
1248 off + size);
1249 return 1;
1250 }
1251 addr = pci_base_offset(adapter, off);
1252 if (!addr) {
1253 mem_base = pci_resource_start(adapter->ahw.pdev, 0);
1254 mem_page = off & PAGE_MASK;
1255 /* Map two pages whenever user tries to access addresses in two
1256 consecutive pages.
1257 */
1258 if (mem_page != ((off + size - 1) & PAGE_MASK))
1259 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
1260 else
1261 mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
1262 if (mem_ptr == 0UL) {
1263 *(u8 *) data = 0;
1264 return 1;
1265 }
1266 addr = mem_ptr;
1267 addr += off & (PAGE_SIZE - 1);
1268 }
1269 switch (size) {
1270 case 1:
1271 *(u8 *) data = readb(addr);
1272 break;
1273 case 2:
1274 *(u16 *) data = readw(addr);
1275 break;
1276 case 4:
1277 *(u32 *) data = readl(addr);
1278 break;
1279 case 8:
1280 *(u64 *) data = readq(addr);
1281 break;
1282 default:
1283 netxen_nic_hw_block_read64((u64 __iomem *) data, addr,
1284 (size >> 3));
1285 break;
1286 }
1287
1288 if (mem_ptr)
1289 iounmap(mem_ptr);
1290 DPRINTK(INFO, "read %llx\n", *(unsigned long long *)data);
1291
1292 return ret;
1293}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index deac1a3ae275..f78668030ec6 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -137,6 +137,8 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
137 return err; 137 return err;
138 } 138 }
139 /* Window 1 call */ 139 /* Window 1 call */
140 writel(MPORT_SINGLE_FUNCTION_MODE,
141 NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE));
140 writel(PHAN_INITIALIZE_ACK, 142 writel(PHAN_INITIALIZE_ACK,
141 NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); 143 NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
142 144
@@ -184,15 +186,12 @@ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter)
184 for (i = 0; i < num_rx_bufs; i++) { 186 for (i = 0; i < num_rx_bufs; i++) {
185 rx_buf->ref_handle = i; 187 rx_buf->ref_handle = i;
186 rx_buf->state = NETXEN_BUFFER_FREE; 188 rx_buf->state = NETXEN_BUFFER_FREE;
187
188 DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:" 189 DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:"
189 "%p\n", ctxid, i, rx_buf); 190 "%p\n", ctxid, i, rx_buf);
190 rx_buf++; 191 rx_buf++;
191 } 192 }
192 } 193 }
193 } 194 }
194 DPRINTK(INFO, "initialized buffers for %s and %s\n",
195 "adapter->free_cmd_buf_list", "adapter->free_rxbuf");
196} 195}
197 196
198void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) 197void netxen_initialize_adapter_hw(struct netxen_adapter *adapter)
@@ -621,6 +620,43 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
621 return 0; 620 return 0;
622} 621}
623 622
623int netxen_initialize_adapter_offload(struct netxen_adapter *adapter)
624{
625 uint64_t addr;
626 uint32_t hi;
627 uint32_t lo;
628
629 adapter->dummy_dma.addr =
630 pci_alloc_consistent(adapter->ahw.pdev,
631 NETXEN_HOST_DUMMY_DMA_SIZE,
632 &adapter->dummy_dma.phys_addr);
633 if (adapter->dummy_dma.addr == NULL) {
634 printk("%s: ERROR: Could not allocate dummy DMA memory\n",
635 __FUNCTION__);
636 return -ENOMEM;
637 }
638
639 addr = (uint64_t) adapter->dummy_dma.phys_addr;
640 hi = (addr >> 32) & 0xffffffff;
641 lo = addr & 0xffffffff;
642
643 writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI));
644 writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO));
645
646 return 0;
647}
648
649void netxen_free_adapter_offload(struct netxen_adapter *adapter)
650{
651 if (adapter->dummy_dma.addr) {
652 pci_free_consistent(adapter->ahw.pdev,
653 NETXEN_HOST_DUMMY_DMA_SIZE,
654 adapter->dummy_dma.addr,
655 adapter->dummy_dma.phys_addr);
656 adapter->dummy_dma.addr = NULL;
657 }
658}
659
624void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) 660void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
625{ 661{
626 u32 val = 0; 662 u32 val = 0;
@@ -655,7 +691,8 @@ int netxen_nic_rx_has_work(struct netxen_adapter *adapter)
655 desc_head = recv_ctx->rcv_status_desc_head; 691 desc_head = recv_ctx->rcv_status_desc_head;
656 desc = &desc_head[consumer]; 692 desc = &desc_head[consumer];
657 693
658 if (((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) 694 if (((le16_to_cpu(netxen_get_sts_owner(desc)))
695 & STATUS_OWNER_HOST))
659 return 1; 696 return 1;
660 } 697 }
661 698
@@ -747,19 +784,19 @@ void
747netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, 784netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
748 struct status_desc *desc) 785 struct status_desc *desc)
749{ 786{
750 struct netxen_port *port = adapter->port[STATUS_DESC_PORT(desc)]; 787 struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)];
751 struct pci_dev *pdev = port->pdev; 788 struct pci_dev *pdev = port->pdev;
752 struct net_device *netdev = port->netdev; 789 struct net_device *netdev = port->netdev;
753 int index = le16_to_cpu(desc->reference_handle); 790 int index = le16_to_cpu(netxen_get_sts_refhandle(desc));
754 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); 791 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
755 struct netxen_rx_buffer *buffer; 792 struct netxen_rx_buffer *buffer;
756 struct sk_buff *skb; 793 struct sk_buff *skb;
757 u32 length = le16_to_cpu(desc->total_length); 794 u32 length = le16_to_cpu(netxen_get_sts_totallength(desc));
758 u32 desc_ctx; 795 u32 desc_ctx;
759 struct netxen_rcv_desc_ctx *rcv_desc; 796 struct netxen_rcv_desc_ctx *rcv_desc;
760 int ret; 797 int ret;
761 798
762 desc_ctx = STATUS_DESC_TYPE(desc); 799 desc_ctx = netxen_get_sts_type(desc);
763 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { 800 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
764 printk("%s: %s Bad Rcv descriptor ring\n", 801 printk("%s: %s Bad Rcv descriptor ring\n",
765 netxen_nic_driver_name, netdev->name); 802 netxen_nic_driver_name, netdev->name);
@@ -767,20 +804,49 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
767 } 804 }
768 805
769 rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; 806 rcv_desc = &recv_ctx->rcv_desc[desc_ctx];
807 if (unlikely(index > rcv_desc->max_rx_desc_count)) {
808 DPRINTK(ERR, "Got a buffer index:%x Max is %x\n",
809 index, rcv_desc->max_rx_desc_count);
810 return;
811 }
770 buffer = &rcv_desc->rx_buf_arr[index]; 812 buffer = &rcv_desc->rx_buf_arr[index];
813 if (desc_ctx == RCV_DESC_LRO_CTXID) {
814 buffer->lro_current_frags++;
815 if (netxen_get_sts_desc_lro_last_frag(desc)) {
816 buffer->lro_expected_frags =
817 netxen_get_sts_desc_lro_cnt(desc);
818 buffer->lro_length = length;
819 }
820 if (buffer->lro_current_frags != buffer->lro_expected_frags) {
821 if (buffer->lro_expected_frags != 0) {
822 printk("LRO: (refhandle:%x) recv frag."
823 "wait for last. flags: %x expected:%d"
824 "have:%d\n", index,
825 netxen_get_sts_desc_lro_last_frag(desc),
826 buffer->lro_expected_frags,
827 buffer->lro_current_frags);
828 }
829 return;
830 }
831 }
771 832
772 pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, 833 pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size,
773 PCI_DMA_FROMDEVICE); 834 PCI_DMA_FROMDEVICE);
774 835
775 skb = (struct sk_buff *)buffer->skb; 836 skb = (struct sk_buff *)buffer->skb;
776 837
777 if (likely(STATUS_DESC_STATUS(desc) == STATUS_CKSUM_OK)) { 838 if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) {
778 port->stats.csummed++; 839 port->stats.csummed++;
779 skb->ip_summed = CHECKSUM_UNNECESSARY; 840 skb->ip_summed = CHECKSUM_UNNECESSARY;
780 } else 841 }
781 skb->ip_summed = CHECKSUM_NONE;
782 skb->dev = netdev; 842 skb->dev = netdev;
783 skb_put(skb, length); 843 if (desc_ctx == RCV_DESC_LRO_CTXID) {
844 /* True length was only available on the last pkt */
845 skb_put(skb, buffer->lro_length);
846 } else {
847 skb_put(skb, length);
848 }
849
784 skb->protocol = eth_type_trans(skb, netdev); 850 skb->protocol = eth_type_trans(skb, netdev);
785 851
786 ret = netif_receive_skb(skb); 852 ret = netif_receive_skb(skb);
@@ -826,6 +892,8 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
826 adapter->stats.post_called++; 892 adapter->stats.post_called++;
827 buffer->skb = NULL; 893 buffer->skb = NULL;
828 buffer->state = NETXEN_BUFFER_FREE; 894 buffer->state = NETXEN_BUFFER_FREE;
895 buffer->lro_current_frags = 0;
896 buffer->lro_expected_frags = 0;
829 897
830 port->stats.no_rcv++; 898 port->stats.no_rcv++;
831 port->stats.rxbytes += length; 899 port->stats.rxbytes += length;
@@ -838,6 +906,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
838 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; 906 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
839 struct status_desc *desc; /* used to read status desc here */ 907 struct status_desc *desc; /* used to read status desc here */
840 u32 consumer = recv_ctx->status_rx_consumer; 908 u32 consumer = recv_ctx->status_rx_consumer;
909 u32 producer = 0;
841 int count = 0, ring; 910 int count = 0, ring;
842 911
843 DPRINTK(INFO, "procesing receive\n"); 912 DPRINTK(INFO, "procesing receive\n");
@@ -849,18 +918,22 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
849 */ 918 */
850 while (count < max) { 919 while (count < max) {
851 desc = &desc_head[consumer]; 920 desc = &desc_head[consumer];
852 if (!((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) { 921 if (!
853 DPRINTK(ERR, "desc %p ownedby %x\n", desc, desc->owner); 922 (le16_to_cpu(netxen_get_sts_owner(desc)) &
923 STATUS_OWNER_HOST)) {
924 DPRINTK(ERR, "desc %p ownedby %x\n", desc,
925 netxen_get_sts_owner(desc));
854 break; 926 break;
855 } 927 }
856 netxen_process_rcv(adapter, ctxid, desc); 928 netxen_process_rcv(adapter, ctxid, desc);
857 desc->owner = STATUS_OWNER_PHANTOM; 929 netxen_clear_sts_owner(desc);
930 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
858 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); 931 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1);
859 count++; 932 count++;
860 } 933 }
861 if (count) { 934 if (count) {
862 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { 935 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) {
863 netxen_post_rx_buffers(adapter, ctxid, ring); 936 netxen_post_rx_buffers_nodb(adapter, ctxid, ring);
864 } 937 }
865 } 938 }
866 939
@@ -868,6 +941,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
868 if (count) { 941 if (count) {
869 adapter->stats.process_rcv++; 942 adapter->stats.process_rcv++;
870 recv_ctx->status_rx_consumer = consumer; 943 recv_ctx->status_rx_consumer = consumer;
944 recv_ctx->status_rx_producer = producer;
871 945
872 /* Window = 1 */ 946 /* Window = 1 */
873 writel(consumer, 947 writel(consumer,
@@ -880,12 +954,13 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
880} 954}
881 955
882/* Process Command status ring */ 956/* Process Command status ring */
883void netxen_process_cmd_ring(unsigned long data) 957int netxen_process_cmd_ring(unsigned long data)
884{ 958{
885 u32 last_consumer; 959 u32 last_consumer;
886 u32 consumer; 960 u32 consumer;
887 struct netxen_adapter *adapter = (struct netxen_adapter *)data; 961 struct netxen_adapter *adapter = (struct netxen_adapter *)data;
888 int count = 0; 962 int count1 = 0;
963 int count2 = 0;
889 struct netxen_cmd_buffer *buffer; 964 struct netxen_cmd_buffer *buffer;
890 struct netxen_port *port; /* port #1 */ 965 struct netxen_port *port; /* port #1 */
891 struct netxen_port *nport; 966 struct netxen_port *nport;
@@ -894,6 +969,7 @@ void netxen_process_cmd_ring(unsigned long data)
894 u32 i; 969 u32 i;
895 struct sk_buff *skb = NULL; 970 struct sk_buff *skb = NULL;
896 int p; 971 int p;
972 int done;
897 973
898 spin_lock(&adapter->tx_lock); 974 spin_lock(&adapter->tx_lock);
899 last_consumer = adapter->last_cmd_consumer; 975 last_consumer = adapter->last_cmd_consumer;
@@ -903,14 +979,13 @@ void netxen_process_cmd_ring(unsigned long data)
903 * number as part of the descriptor. This way we will be able to get 979 * number as part of the descriptor. This way we will be able to get
904 * the netdev which is associated with that device. 980 * the netdev which is associated with that device.
905 */ 981 */
906 consumer =
907 readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
908 982
983 consumer = *(adapter->cmd_consumer);
909 if (last_consumer == consumer) { /* Ring is empty */ 984 if (last_consumer == consumer) { /* Ring is empty */
910 DPRINTK(INFO, "last_consumer %d == consumer %d\n", 985 DPRINTK(INFO, "last_consumer %d == consumer %d\n",
911 last_consumer, consumer); 986 last_consumer, consumer);
912 spin_unlock(&adapter->tx_lock); 987 spin_unlock(&adapter->tx_lock);
913 return; 988 return 1;
914 } 989 }
915 990
916 adapter->proc_cmd_buf_counter++; 991 adapter->proc_cmd_buf_counter++;
@@ -921,7 +996,7 @@ void netxen_process_cmd_ring(unsigned long data)
921 */ 996 */
922 spin_unlock(&adapter->tx_lock); 997 spin_unlock(&adapter->tx_lock);
923 998
924 while ((last_consumer != consumer) && (count < MAX_STATUS_HANDLE)) { 999 while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) {
925 buffer = &adapter->cmd_buf_arr[last_consumer]; 1000 buffer = &adapter->cmd_buf_arr[last_consumer];
926 port = adapter->port[buffer->port]; 1001 port = adapter->port[buffer->port];
927 pdev = port->pdev; 1002 pdev = port->pdev;
@@ -947,24 +1022,25 @@ void netxen_process_cmd_ring(unsigned long data)
947 && netif_carrier_ok(port->netdev)) 1022 && netif_carrier_ok(port->netdev))
948 && ((jiffies - port->netdev->trans_start) > 1023 && ((jiffies - port->netdev->trans_start) >
949 port->netdev->watchdog_timeo)) { 1024 port->netdev->watchdog_timeo)) {
950 schedule_work(&port->adapter->tx_timeout_task); 1025 SCHEDULE_WORK(port->adapter->tx_timeout_task
1026 + port->portnum);
951 } 1027 }
952 1028
953 last_consumer = get_next_index(last_consumer, 1029 last_consumer = get_next_index(last_consumer,
954 adapter->max_tx_desc_count); 1030 adapter->max_tx_desc_count);
955 count++; 1031 count1++;
956 } 1032 }
957 adapter->stats.noxmitdone += count; 1033 adapter->stats.noxmitdone += count1;
958 1034
959 count = 0; 1035 count2 = 0;
960 spin_lock(&adapter->tx_lock); 1036 spin_lock(&adapter->tx_lock);
961 if ((--adapter->proc_cmd_buf_counter) == 0) { 1037 if ((--adapter->proc_cmd_buf_counter) == 0) {
962 adapter->last_cmd_consumer = last_consumer; 1038 adapter->last_cmd_consumer = last_consumer;
963 while ((adapter->last_cmd_consumer != consumer) 1039 while ((adapter->last_cmd_consumer != consumer)
964 && (count < MAX_STATUS_HANDLE)) { 1040 && (count2 < MAX_STATUS_HANDLE)) {
965 buffer = 1041 buffer =
966 &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; 1042 &adapter->cmd_buf_arr[adapter->last_cmd_consumer];
967 count++; 1043 count2++;
968 if (buffer->skb) 1044 if (buffer->skb)
969 break; 1045 break;
970 else 1046 else
@@ -973,7 +1049,7 @@ void netxen_process_cmd_ring(unsigned long data)
973 adapter->max_tx_desc_count); 1049 adapter->max_tx_desc_count);
974 } 1050 }
975 } 1051 }
976 if (count) { 1052 if (count1 || count2) {
977 for (p = 0; p < adapter->ahw.max_ports; p++) { 1053 for (p = 0; p < adapter->ahw.max_ports; p++) {
978 nport = adapter->port[p]; 1054 nport = adapter->port[p];
979 if (netif_queue_stopped(nport->netdev) 1055 if (netif_queue_stopped(nport->netdev)
@@ -983,10 +1059,30 @@ void netxen_process_cmd_ring(unsigned long data)
983 } 1059 }
984 } 1060 }
985 } 1061 }
1062 /*
1063 * If everything is freed up to consumer then check if the ring is full
1064 * If the ring is full then check if more needs to be freed and
1065 * schedule the call back again.
1066 *
1067 * This happens when there are 2 CPUs. One could be freeing and the
1068 * other filling it. If the ring is full when we get out of here and
1069 * the card has already interrupted the host then the host can miss the
1070 * interrupt.
1071 *
1072 * There is still a possible race condition and the host could miss an
1073 * interrupt. The card has to take care of this.
1074 */
1075 if (adapter->last_cmd_consumer == consumer &&
1076 (((adapter->cmd_producer + 1) %
1077 adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) {
1078 consumer = *(adapter->cmd_consumer);
1079 }
1080 done = (adapter->last_cmd_consumer == consumer);
986 1081
987 spin_unlock(&adapter->tx_lock); 1082 spin_unlock(&adapter->tx_lock);
988 DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer, 1083 DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer,
989 __FUNCTION__); 1084 __FUNCTION__);
1085 return (done);
990} 1086}
991 1087
992/* 1088/*
@@ -998,17 +1094,16 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
998 struct sk_buff *skb; 1094 struct sk_buff *skb;
999 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); 1095 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1000 struct netxen_rcv_desc_ctx *rcv_desc = NULL; 1096 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
1001 struct netxen_recv_crb *crbarea = &recv_crb_registers[ctx]; 1097 uint producer;
1002 struct netxen_rcv_desc_crb *rcv_desc_crb = NULL;
1003 u32 producer;
1004 struct rcv_desc *pdesc; 1098 struct rcv_desc *pdesc;
1005 struct netxen_rx_buffer *buffer; 1099 struct netxen_rx_buffer *buffer;
1006 int count = 0; 1100 int count = 0;
1007 int index = 0; 1101 int index = 0;
1102 netxen_ctx_msg msg = 0;
1103 dma_addr_t dma;
1008 1104
1009 adapter->stats.post_called++; 1105 adapter->stats.post_called++;
1010 rcv_desc = &recv_ctx->rcv_desc[ringid]; 1106 rcv_desc = &recv_ctx->rcv_desc[ringid];
1011 rcv_desc_crb = &crbarea->rcv_desc_crb[ringid];
1012 1107
1013 producer = rcv_desc->producer; 1108 producer = rcv_desc->producer;
1014 index = rcv_desc->begin_alloc; 1109 index = rcv_desc->begin_alloc;
@@ -1018,6 +1113,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1018 skb = dev_alloc_skb(rcv_desc->skb_size); 1113 skb = dev_alloc_skb(rcv_desc->skb_size);
1019 if (unlikely(!skb)) { 1114 if (unlikely(!skb)) {
1020 /* 1115 /*
1116 * TODO
1021 * We need to schedule the posting of buffers to the pegs. 1117 * We need to schedule the posting of buffers to the pegs.
1022 */ 1118 */
1023 rcv_desc->begin_alloc = index; 1119 rcv_desc->begin_alloc = index;
@@ -1025,9 +1121,105 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1025 " allocated only %d buffers\n", count); 1121 " allocated only %d buffers\n", count);
1026 break; 1122 break;
1027 } 1123 }
1124
1125 count++; /* now there should be no failure */
1126 pdesc = &rcv_desc->desc_head[producer];
1127
1128#if defined(XGB_DEBUG)
1129 *(unsigned long *)(skb->head) = 0xc0debabe;
1130 if (skb_is_nonlinear(skb)) {
1131 printk("Allocated SKB @%p is nonlinear\n");
1132 }
1133#endif
1134 skb_reserve(skb, 2);
1135 /* This will be setup when we receive the
1136 * buffer after it has been filled FSL TBD TBD
1137 * skb->dev = netdev;
1138 */
1139 dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size,
1140 PCI_DMA_FROMDEVICE);
1141 pdesc->addr_buffer = dma;
1142 buffer->skb = skb;
1143 buffer->state = NETXEN_BUFFER_BUSY;
1144 buffer->dma = dma;
1145 /* make a rcv descriptor */
1146 pdesc->reference_handle = buffer->ref_handle;
1147 pdesc->buffer_length = rcv_desc->dma_size;
1148 DPRINTK(INFO, "done writing descripter\n");
1149 producer =
1150 get_next_index(producer, rcv_desc->max_rx_desc_count);
1151 index = get_next_index(index, rcv_desc->max_rx_desc_count);
1152 buffer = &rcv_desc->rx_buf_arr[index];
1153 }
1154 /* if we did allocate buffers, then write the count to Phantom */
1155 if (count) {
1156 rcv_desc->begin_alloc = index;
1157 rcv_desc->rcv_pending += count;
1158 adapter->stats.lastposted = count;
1159 adapter->stats.posted += count;
1160 rcv_desc->producer = producer;
1161 if (rcv_desc->rcv_free >= 32) {
1162 rcv_desc->rcv_free = 0;
1163 /* Window = 1 */
1164 writel((producer - 1) &
1165 (rcv_desc->max_rx_desc_count - 1),
1166 NETXEN_CRB_NORMALIZE(adapter,
1167 recv_crb_registers[0].
1168 rcv_desc_crb[ringid].
1169 crb_rcv_producer_offset));
1170 /*
1171 * Write a doorbell msg to tell phanmon of change in
1172 * receive ring producer
1173 */
1174 netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
1175 netxen_set_msg_privid(msg);
1176 netxen_set_msg_count(msg,
1177 ((producer -
1178 1) & (rcv_desc->
1179 max_rx_desc_count - 1)));
1180 netxen_set_msg_ctxid(msg, 0);
1181 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1182 writel(msg,
1183 DB_NORMALIZE(adapter,
1184 NETXEN_RCV_PRODUCER_OFFSET));
1185 }
1186 }
1187}
1188
1189void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx,
1190 uint32_t ringid)
1191{
1192 struct pci_dev *pdev = adapter->ahw.pdev;
1193 struct sk_buff *skb;
1194 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]);
1195 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
1196 u32 producer;
1197 struct rcv_desc *pdesc;
1198 struct netxen_rx_buffer *buffer;
1199 int count = 0;
1200 int index = 0;
1201
1202 adapter->stats.post_called++;
1203 rcv_desc = &recv_ctx->rcv_desc[ringid];
1204
1205 producer = rcv_desc->producer;
1206 index = rcv_desc->begin_alloc;
1207 buffer = &rcv_desc->rx_buf_arr[index];
1208 /* We can start writing rx descriptors into the phantom memory. */
1209 while (buffer->state == NETXEN_BUFFER_FREE) {
1210 skb = dev_alloc_skb(rcv_desc->skb_size);
1211 if (unlikely(!skb)) {
1212 /*
1213 * We need to schedule the posting of buffers to the pegs.
1214 */
1215 rcv_desc->begin_alloc = index;
1216 DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
1217 " allocated only %d buffers\n", count);
1218 break;
1219 }
1028 count++; /* now there should be no failure */ 1220 count++; /* now there should be no failure */
1029 pdesc = &rcv_desc->desc_head[producer]; 1221 pdesc = &rcv_desc->desc_head[producer];
1030 skb_reserve(skb, NET_IP_ALIGN); 1222 skb_reserve(skb, 2);
1031 /* 1223 /*
1032 * This will be setup when we receive the 1224 * This will be setup when we receive the
1033 * buffer after it has been filled 1225 * buffer after it has been filled
@@ -1038,6 +1230,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1038 buffer->dma = pci_map_single(pdev, skb->data, 1230 buffer->dma = pci_map_single(pdev, skb->data,
1039 rcv_desc->dma_size, 1231 rcv_desc->dma_size,
1040 PCI_DMA_FROMDEVICE); 1232 PCI_DMA_FROMDEVICE);
1233
1041 /* make a rcv descriptor */ 1234 /* make a rcv descriptor */
1042 pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); 1235 pdesc->reference_handle = le16_to_cpu(buffer->ref_handle);
1043 pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); 1236 pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size);
@@ -1062,7 +1255,8 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1062 writel((producer - 1) & 1255 writel((producer - 1) &
1063 (rcv_desc->max_rx_desc_count - 1), 1256 (rcv_desc->max_rx_desc_count - 1),
1064 NETXEN_CRB_NORMALIZE(adapter, 1257 NETXEN_CRB_NORMALIZE(adapter,
1065 rcv_desc_crb-> 1258 recv_crb_registers[0].
1259 rcv_desc_crb[ringid].
1066 crb_rcv_producer_offset)); 1260 crb_rcv_producer_offset));
1067 wmb(); 1261 wmb();
1068 } 1262 }
@@ -1195,8 +1389,8 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
1195 1389
1196 switch (data.cmd) { 1390 switch (data.cmd) {
1197 case netxen_nic_cmd_pci_read: 1391 case netxen_nic_cmd_pci_read:
1198 if ((retval = netxen_nic_hw_read_wx(adapter, data.off, 1392 if ((retval = netxen_nic_hw_read_ioctl(adapter, data.off,
1199 &(data.u), data.size))) 1393 &(data.u), data.size)))
1200 goto error_out; 1394 goto error_out;
1201 if (copy_to_user 1395 if (copy_to_user
1202 ((void __user *)&(up_data->u), &(data.u), data.size)) { 1396 ((void __user *)&(up_data->u), &(data.u), data.size)) {
@@ -1209,8 +1403,35 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
1209 break; 1403 break;
1210 1404
1211 case netxen_nic_cmd_pci_write: 1405 case netxen_nic_cmd_pci_write:
1212 data.rv = netxen_nic_hw_write_wx(adapter, data.off, &(data.u), 1406 if ((retval = netxen_nic_hw_write_ioctl(adapter, data.off,
1213 data.size); 1407 &(data.u), data.size)))
1408 goto error_out;
1409 data.rv = 0;
1410 break;
1411
1412 case netxen_nic_cmd_pci_mem_read:
1413 if (netxen_nic_pci_mem_read_ioctl(adapter, data.off, &(data.u),
1414 data.size)) {
1415 DPRINTK(ERR, "Failed to read the data.\n");
1416 retval = -EFAULT;
1417 goto error_out;
1418 }
1419 if (copy_to_user
1420 ((void __user *)&(up_data->u), &(data.u), data.size)) {
1421 DPRINTK(ERR, "bad copy to userland: %d\n",
1422 (int)sizeof(data));
1423 retval = -EFAULT;
1424 goto error_out;
1425 }
1426 data.rv = 0;
1427 break;
1428
1429 case netxen_nic_cmd_pci_mem_write:
1430 if ((retval = netxen_nic_pci_mem_write_ioctl(adapter, data.off,
1431 &(data.u),
1432 data.size)))
1433 goto error_out;
1434 data.rv = 0;
1214 break; 1435 break;
1215 1436
1216 case netxen_nic_cmd_pci_config_read: 1437 case netxen_nic_cmd_pci_config_read:
@@ -1295,7 +1516,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data,
1295 retval = -EOPNOTSUPP; 1516 retval = -EOPNOTSUPP;
1296 goto error_out; 1517 goto error_out;
1297 } 1518 }
1298 put_user(data.rv, (u16 __user *) (&(up_data->rv))); 1519 put_user(data.rv, (&(up_data->rv)));
1299 DPRINTK(INFO, "done ioctl for %p well.\n", adapter); 1520 DPRINTK(INFO, "done ioctl for %p well.\n", adapter);
1300 1521
1301 error_out: 1522 error_out:
diff --git a/drivers/net/netxen/netxen_nic_ioctl.h b/drivers/net/netxen/netxen_nic_ioctl.h
index 8eef139f250b..1221fa527552 100644
--- a/drivers/net/netxen/netxen_nic_ioctl.h
+++ b/drivers/net/netxen/netxen_nic_ioctl.h
@@ -36,7 +36,7 @@
36#define NETXEN_NIC_CMD (NETXEN_CMD_START + 1) 36#define NETXEN_NIC_CMD (NETXEN_CMD_START + 1)
37#define NETXEN_NIC_NAME (NETXEN_CMD_START + 2) 37#define NETXEN_NIC_NAME (NETXEN_CMD_START + 2)
38#define NETXEN_NIC_NAME_LEN 16 38#define NETXEN_NIC_NAME_LEN 16
39#define NETXEN_NIC_NAME_RSP "NETXEN" 39#define NETXEN_NIC_NAME_RSP "NETXEN-UNM"
40 40
41typedef enum { 41typedef enum {
42 netxen_nic_cmd_none = 0, 42 netxen_nic_cmd_none = 0,
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
index 0f6e7b8b65db..1b45f50fa6aa 100644
--- a/drivers/net/netxen/netxen_nic_isr.c
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -68,8 +68,7 @@ struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
68void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno, 68void netxen_indicate_link_status(struct netxen_adapter *adapter, u32 portno,
69 u32 link) 69 u32 link)
70{ 70{
71 struct netxen_port *pport = adapter->port[portno]; 71 struct net_device *netdev = (adapter->port[portno])->netdev;
72 struct net_device *netdev = pport->netdev;
73 72
74 if (link) 73 if (link)
75 netif_carrier_on(netdev); 74 netif_carrier_on(netdev);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 6dbdc8be3949..06c4778f5200 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/highmem.h>
35#include "netxen_nic_hw.h" 36#include "netxen_nic_hw.h"
36 37
37#include "netxen_nic.h" 38#include "netxen_nic.h"
@@ -48,14 +49,21 @@ MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver");
48MODULE_LICENSE("GPL"); 49MODULE_LICENSE("GPL");
49MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 50MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
50 51
51char netxen_nic_driver_name[] = "netxen"; 52char netxen_nic_driver_name[] = "netxen-nic";
52static char netxen_nic_driver_string[] = "NetXen Network Driver version " 53static char netxen_nic_driver_string[] = "NetXen Network Driver version "
53 NETXEN_NIC_LINUX_VERSIONID; 54 NETXEN_NIC_LINUX_VERSIONID;
54 55
56struct netxen_adapter *g_adapter = NULL;
57
55#define NETXEN_NETDEV_WEIGHT 120 58#define NETXEN_NETDEV_WEIGHT 120
56#define NETXEN_ADAPTER_UP_MAGIC 777 59#define NETXEN_ADAPTER_UP_MAGIC 777
57#define NETXEN_NIC_PEG_TUNE 0 60#define NETXEN_NIC_PEG_TUNE 0
58 61
62u8 nx_p2_id = NX_P2_C0;
63
64#define DMA_32BIT_MASK 0x00000000ffffffffULL
65#define DMA_35BIT_MASK 0x00000007ffffffffULL
66
59/* Local functions to NetXen NIC driver */ 67/* Local functions to NetXen NIC driver */
60static int __devinit netxen_nic_probe(struct pci_dev *pdev, 68static int __devinit netxen_nic_probe(struct pci_dev *pdev,
61 const struct pci_device_id *ent); 69 const struct pci_device_id *ent);
@@ -87,6 +95,9 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
87 95
88MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 96MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
89 97
98struct workqueue_struct *netxen_workq;
99static void netxen_watchdog(unsigned long);
100
90/* 101/*
91 * netxen_nic_probe() 102 * netxen_nic_probe()
92 * 103 *
@@ -105,20 +116,28 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
105 struct net_device *netdev = NULL; 116 struct net_device *netdev = NULL;
106 struct netxen_adapter *adapter = NULL; 117 struct netxen_adapter *adapter = NULL;
107 struct netxen_port *port = NULL; 118 struct netxen_port *port = NULL;
108 u8 *mem_ptr0 = NULL; 119 void __iomem *mem_ptr0 = NULL;
109 u8 *mem_ptr1 = NULL; 120 void __iomem *mem_ptr1 = NULL;
110 u8 *mem_ptr2 = NULL; 121 void __iomem *mem_ptr2 = NULL;
111 122
112 unsigned long mem_base, mem_len; 123 u8 *db_ptr = NULL;
124 unsigned long mem_base, mem_len, db_base, db_len;
113 int pci_using_dac, i, err; 125 int pci_using_dac, i, err;
114 int ring; 126 int ring;
115 struct netxen_recv_context *recv_ctx = NULL; 127 struct netxen_recv_context *recv_ctx = NULL;
116 struct netxen_rcv_desc_ctx *rcv_desc = NULL; 128 struct netxen_rcv_desc_ctx *rcv_desc = NULL;
117 struct netxen_cmd_buffer *cmd_buf_arr = NULL; 129 struct netxen_cmd_buffer *cmd_buf_arr = NULL;
118 u64 mac_addr[FLASH_NUM_PORTS + 1]; 130 u64 mac_addr[FLASH_NUM_PORTS + 1];
119 int valid_mac; 131 int valid_mac = 0;
132 static int netxen_cards_found = 0;
120 133
121 printk(KERN_INFO "%s \n", netxen_nic_driver_string); 134 printk(KERN_INFO "%s \n", netxen_nic_driver_string);
135 /* In current scheme, we use only PCI function 0 */
136 if (PCI_FUNC(pdev->devfn) != 0) {
137 DPRINTK(ERR, "NetXen function %d will not be enabled.\n",
138 PCI_FUNC(pdev->devfn));
139 return -ENODEV;
140 }
122 if ((err = pci_enable_device(pdev))) 141 if ((err = pci_enable_device(pdev)))
123 return err; 142 return err;
124 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 143 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
@@ -130,10 +149,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
130 goto err_out_disable_pdev; 149 goto err_out_disable_pdev;
131 150
132 pci_set_master(pdev); 151 pci_set_master(pdev);
133 if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) && 152 pci_read_config_byte(pdev, PCI_REVISION_ID, &nx_p2_id);
134 (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0)) 153 if (nx_p2_id == NX_P2_C1 &&
154 (pci_set_dma_mask(pdev, DMA_35BIT_MASK) == 0) &&
155 (pci_set_consistent_dma_mask(pdev, DMA_35BIT_MASK) == 0)) {
135 pci_using_dac = 1; 156 pci_using_dac = 1;
136 else { 157 } else {
137 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || 158 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
138 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) 159 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)))
139 goto err_out_free_res; 160 goto err_out_free_res;
@@ -153,21 +174,34 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
153 ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE); 174 ioremap(mem_base + THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
154 175
155 if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) { 176 if ((mem_ptr0 == 0UL) || (mem_ptr1 == 0UL) || (mem_ptr2 == 0UL)) {
156 DPRINTK(1, ERR, 177 DPRINTK(ERR,
157 "Cannot remap adapter memory aborting.:" 178 "Cannot remap adapter memory aborting.:"
158 "0 -> %p, 1 -> %p, 2 -> %p\n", 179 "0 -> %p, 1 -> %p, 2 -> %p\n",
159 mem_ptr0, mem_ptr1, mem_ptr2); 180 mem_ptr0, mem_ptr1, mem_ptr2);
160 181
161 err = -EIO; 182 err = -EIO;
162 if (mem_ptr0) 183 goto err_out_iounmap;
163 iounmap(mem_ptr0); 184 }
164 if (mem_ptr1) 185 db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
165 iounmap(mem_ptr1); 186 db_len = pci_resource_len(pdev, 4);
166 if (mem_ptr2) 187
167 iounmap(mem_ptr2); 188 if (db_len == 0) {
168 189 printk(KERN_ERR "%s: doorbell is disabled\n",
169 goto err_out_free_res; 190 netxen_nic_driver_name);
191 err = -EIO;
192 goto err_out_iounmap;
193 }
194 DPRINTK(INFO, "doorbell ioremap from %lx a size of %lx\n", db_base,
195 db_len);
196
197 db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
198 if (db_ptr == 0UL) {
199 printk(KERN_ERR "%s: Failed to allocate doorbell map.",
200 netxen_nic_driver_name);
201 err = -EIO;
202 goto err_out_iounmap;
170 } 203 }
204 DPRINTK(INFO, "doorbell ioremaped at %p\n", db_ptr);
171 205
172/* 206/*
173 * Allocate a adapter structure which will manage all the initialization 207 * Allocate a adapter structure which will manage all the initialization
@@ -183,17 +217,24 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
183 netxen_nic_driver_name, 217 netxen_nic_driver_name,
184 (int)sizeof(struct netxen_adapter)); 218 (int)sizeof(struct netxen_adapter));
185 err = -ENOMEM; 219 err = -ENOMEM;
186 goto err_out_iounmap; 220 goto err_out_dbunmap;
187 } 221 }
188 222
223 if (netxen_cards_found == 0) {
224 g_adapter = adapter;
225 }
189 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; 226 adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS;
190 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; 227 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS;
191 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; 228 adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS;
229 adapter->max_lro_rx_desc_count = MAX_LRO_RCV_DESCRIPTORS;
192 230
193 pci_set_drvdata(pdev, adapter); 231 pci_set_drvdata(pdev, adapter);
194 232
195 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); 233 cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE);
196 if (cmd_buf_arr == NULL) { 234 if (cmd_buf_arr == NULL) {
235 printk(KERN_ERR
236 "%s: Could not allocate cmd_buf_arr memory:%d\n",
237 netxen_nic_driver_name, (int)TX_RINGSIZE);
197 err = -ENOMEM; 238 err = -ENOMEM;
198 goto err_out_free_adapter; 239 goto err_out_free_adapter;
199 } 240 }
@@ -220,11 +261,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
220 rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH; 261 rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH;
221 break; 262 break;
222 263
264 case RCV_RING_LRO:
265 rcv_desc->max_rx_desc_count =
266 adapter->max_lro_rx_desc_count;
267 rcv_desc->flags = RCV_DESC_LRO;
268 rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN;
269 rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH;
270 break;
271
223 } 272 }
224 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) 273 rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *)
225 vmalloc(RCV_BUFFSIZE); 274 vmalloc(RCV_BUFFSIZE);
226 275
227 if (rcv_desc->rx_buf_arr == NULL) { 276 if (rcv_desc->rx_buf_arr == NULL) {
277 printk(KERN_ERR "%s: Could not allocate"
278 "rcv_desc->rx_buf_arr memory:%d\n",
279 netxen_nic_driver_name,
280 (int)RCV_BUFFSIZE);
228 err = -ENOMEM; 281 err = -ENOMEM;
229 goto err_out_free_rx_buffer; 282 goto err_out_free_rx_buffer;
230 } 283 }
@@ -237,16 +290,17 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
237 adapter->ahw.pci_base0 = mem_ptr0; 290 adapter->ahw.pci_base0 = mem_ptr0;
238 adapter->ahw.pci_base1 = mem_ptr1; 291 adapter->ahw.pci_base1 = mem_ptr1;
239 adapter->ahw.pci_base2 = mem_ptr2; 292 adapter->ahw.pci_base2 = mem_ptr2;
293 adapter->ahw.db_base = db_ptr;
294 adapter->ahw.db_len = db_len;
240 spin_lock_init(&adapter->tx_lock); 295 spin_lock_init(&adapter->tx_lock);
241 spin_lock_init(&adapter->lock); 296 spin_lock_init(&adapter->lock);
297 netxen_initialize_adapter_sw(adapter); /* initialize the buffers in adapter */
242#ifdef CONFIG_IA64 298#ifdef CONFIG_IA64
243 netxen_pinit_from_rom(adapter, 0); 299 netxen_pinit_from_rom(adapter, 0);
244 udelay(500); 300 udelay(500);
245 netxen_load_firmware(adapter); 301 netxen_load_firmware(adapter);
246#endif 302#endif
247 303
248 /* initialize the buffers in adapter */
249 netxen_initialize_adapter_sw(adapter);
250 /* 304 /*
251 * Set the CRB window to invalid. If any register in window 0 is 305 * Set the CRB window to invalid. If any register in window 0 is
252 * accessed it should set the window to 0 and then reset it to 1. 306 * accessed it should set the window to 0 and then reset it to 1.
@@ -268,7 +322,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
268 (void (*)(void *))netxen_watchdog_task, adapter); 322 (void (*)(void *))netxen_watchdog_task, adapter);
269 adapter->ahw.pdev = pdev; 323 adapter->ahw.pdev = pdev;
270 adapter->proc_cmd_buf_counter = 0; 324 adapter->proc_cmd_buf_counter = 0;
271 pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); 325 adapter->ahw.revision_id = nx_p2_id;
272 326
273 if (pci_enable_msi(pdev)) { 327 if (pci_enable_msi(pdev)) {
274 adapter->flags &= ~NETXEN_NIC_MSI_ENABLED; 328 adapter->flags &= ~NETXEN_NIC_MSI_ENABLED;
@@ -290,6 +344,12 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
290 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET)); 344 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET));
291 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); 345 writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO));
292 346
347 /* do this before waking up pegs so that we have valid dummy dma addr */
348 err = netxen_initialize_adapter_offload(adapter);
349 if (err) {
350 goto err_out_free_dev;
351 }
352
293 /* Unlock the HW, prompting the boot sequence */ 353 /* Unlock the HW, prompting the boot sequence */
294 writel(1, 354 writel(1,
295 NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE)); 355 NETXEN_CRB_NORMALIZE(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE));
@@ -298,6 +358,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
298 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 358 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
299 359
300 /* initialize the all the ports */ 360 /* initialize the all the ports */
361 adapter->active_ports = 0;
301 362
302 for (i = 0; i < adapter->ahw.max_ports; i++) { 363 for (i = 0; i < adapter->ahw.max_ports; i++) {
303 netdev = alloc_etherdev(sizeof(struct netxen_port)); 364 netdev = alloc_etherdev(sizeof(struct netxen_port));
@@ -368,7 +429,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
368 netdev->dev_addr); 429 netdev->dev_addr);
369 } 430 }
370 } 431 }
371 INIT_WORK(&adapter->tx_timeout_task, 432 INIT_WORK(adapter->tx_timeout_task + i,
372 (void (*)(void *))netxen_tx_timeout_task, netdev); 433 (void (*)(void *))netxen_tx_timeout_task, netdev);
373 netif_carrier_off(netdev); 434 netif_carrier_off(netdev);
374 netif_stop_queue(netdev); 435 netif_stop_queue(netdev);
@@ -381,7 +442,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
381 goto err_out_free_dev; 442 goto err_out_free_dev;
382 } 443 }
383 adapter->port_count++; 444 adapter->port_count++;
384 adapter->active_ports = 0;
385 adapter->port[i] = port; 445 adapter->port[i] = port;
386 } 446 }
387 447
@@ -402,6 +462,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
402 break; 462 break;
403 } 463 }
404 464
465 adapter->number = netxen_cards_found;
405 adapter->driver_mismatch = 0; 466 adapter->driver_mismatch = 0;
406 467
407 return 0; 468 return 0;
@@ -417,6 +478,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
417 } 478 }
418 } 479 }
419 480
481 netxen_free_adapter_offload(adapter);
482
420 err_out_free_rx_buffer: 483 err_out_free_rx_buffer:
421 for (i = 0; i < MAX_RCV_CTX; ++i) { 484 for (i = 0; i < MAX_RCV_CTX; ++i) {
422 recv_ctx = &adapter->recv_ctx[i]; 485 recv_ctx = &adapter->recv_ctx[i];
@@ -428,19 +491,23 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
428 } 491 }
429 } 492 }
430 } 493 }
431
432 vfree(cmd_buf_arr); 494 vfree(cmd_buf_arr);
433 495
434 kfree(adapter->port);
435
436 err_out_free_adapter: 496 err_out_free_adapter:
437 pci_set_drvdata(pdev, NULL); 497 pci_set_drvdata(pdev, NULL);
438 kfree(adapter); 498 kfree(adapter);
439 499
500 err_out_dbunmap:
501 if (db_ptr)
502 iounmap(db_ptr);
503
440 err_out_iounmap: 504 err_out_iounmap:
441 iounmap(mem_ptr0); 505 if (mem_ptr0)
442 iounmap(mem_ptr1); 506 iounmap(mem_ptr0);
443 iounmap(mem_ptr2); 507 if (mem_ptr1)
508 iounmap(mem_ptr1);
509 if (mem_ptr2)
510 iounmap(mem_ptr2);
444 511
445 err_out_free_res: 512 err_out_free_res:
446 pci_release_regions(pdev); 513 pci_release_regions(pdev);
@@ -465,12 +532,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
465 532
466 netxen_nic_stop_all_ports(adapter); 533 netxen_nic_stop_all_ports(adapter);
467 /* leave the hw in the same state as reboot */ 534 /* leave the hw in the same state as reboot */
468 netxen_pinit_from_rom(adapter, 0);
469 udelay(500);
470 netxen_load_firmware(adapter); 535 netxen_load_firmware(adapter);
471 536 netxen_free_adapter_offload(adapter);
472 if ((adapter->flags & NETXEN_NIC_MSI_ENABLED))
473 netxen_nic_disable_int(adapter);
474 537
475 udelay(500); /* Delay for a while to drain the DMA engines */ 538 udelay(500); /* Delay for a while to drain the DMA engines */
476 for (i = 0; i < adapter->port_count; i++) { 539 for (i = 0; i < adapter->port_count; i++) {
@@ -487,6 +550,7 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
487 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) 550 if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
488 netxen_free_hw_resources(adapter); 551 netxen_free_hw_resources(adapter);
489 552
553 iounmap(adapter->ahw.db_base);
490 iounmap(adapter->ahw.pci_base0); 554 iounmap(adapter->ahw.pci_base0);
491 iounmap(adapter->ahw.pci_base1); 555 iounmap(adapter->ahw.pci_base1);
492 iounmap(adapter->ahw.pci_base2); 556 iounmap(adapter->ahw.pci_base2);
@@ -534,6 +598,8 @@ static int netxen_nic_open(struct net_device *netdev)
534 return -EIO; 598 return -EIO;
535 } 599 }
536 netxen_nic_flash_print(adapter); 600 netxen_nic_flash_print(adapter);
601 if (adapter->init_niu)
602 adapter->init_niu(adapter);
537 603
538 /* setup all the resources for the Phantom... */ 604 /* setup all the resources for the Phantom... */
539 /* this include the descriptors for rcv, tx, and status */ 605 /* this include the descriptors for rcv, tx, and status */
@@ -551,25 +617,24 @@ static int netxen_nic_open(struct net_device *netdev)
551 netxen_free_hw_resources(adapter); 617 netxen_free_hw_resources(adapter);
552 return -EIO; 618 return -EIO;
553 } 619 }
554 if (adapter->init_niu)
555 adapter->init_niu(adapter);
556 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { 620 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
557 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) 621 for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++)
558 netxen_post_rx_buffers(adapter, ctx, ring); 622 netxen_post_rx_buffers(adapter, ctx, ring);
559 } 623 }
560 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; 624 adapter->irq = adapter->ahw.pdev->irq;
561 }
562 adapter->active_ports++;
563 if (adapter->active_ports == 1) {
564 err = request_irq(adapter->ahw.pdev->irq, &netxen_intr, 625 err = request_irq(adapter->ahw.pdev->irq, &netxen_intr,
565 SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name, 626 SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name,
566 adapter); 627 adapter);
567 if (err) { 628 if (err) {
568 printk(KERN_ERR "request_irq failed with: %d\n", err); 629 printk(KERN_ERR "request_irq failed with: %d\n", err);
569 adapter->active_ports--; 630 netxen_free_hw_resources(adapter);
570 return err; 631 return err;
571 } 632 }
572 adapter->irq = adapter->ahw.pdev->irq; 633
634 adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
635 }
636 adapter->active_ports++;
637 if (adapter->active_ports == 1) {
573 if (!adapter->driver_mismatch) 638 if (!adapter->driver_mismatch)
574 mod_timer(&adapter->watchdog_timer, jiffies); 639 mod_timer(&adapter->watchdog_timer, jiffies);
575 640
@@ -583,6 +648,9 @@ static int netxen_nic_open(struct net_device *netdev)
583 netxen_nic_set_link_parameters(port); 648 netxen_nic_set_link_parameters(port);
584 649
585 netxen_nic_set_multi(netdev); 650 netxen_nic_set_multi(netdev);
651 if (adapter->set_mtu)
652 adapter->set_mtu(port, netdev->mtu);
653
586 if (!adapter->driver_mismatch) 654 if (!adapter->driver_mismatch)
587 netif_start_queue(netdev); 655 netif_start_queue(netdev);
588 656
@@ -635,6 +703,7 @@ static int netxen_nic_close(struct net_device *netdev)
635 } 703 }
636 cmd_buff++; 704 cmd_buff++;
637 } 705 }
706 FLUSH_SCHEDULED_WORK();
638 del_timer_sync(&adapter->watchdog_timer); 707 del_timer_sync(&adapter->watchdog_timer);
639 } 708 }
640 709
@@ -655,7 +724,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
655 struct cmd_desc_type0 *hwdesc; 724 struct cmd_desc_type0 *hwdesc;
656 int k; 725 int k;
657 struct netxen_cmd_buffer *pbuf = NULL; 726 struct netxen_cmd_buffer *pbuf = NULL;
658 unsigned int tries = 0;
659 static int dropped_packet = 0; 727 static int dropped_packet = 0;
660 int frag_count; 728 int frag_count;
661 u32 local_producer = 0; 729 u32 local_producer = 0;
@@ -717,7 +785,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
717 if (((skb->nh.iph)->ihl * sizeof(u32)) + 785 if (((skb->nh.iph)->ihl * sizeof(u32)) +
718 ((skb->h.th)->doff * sizeof(u32)) + 786 ((skb->h.th)->doff * sizeof(u32)) +
719 sizeof(struct ethhdr) > 787 sizeof(struct ethhdr) >
720 (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { 788 (sizeof(struct cmd_desc_type0) - 2)) {
721 no_of_desc++; 789 no_of_desc++;
722 } 790 }
723 } 791 }
@@ -728,27 +796,17 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
728 if ((k + no_of_desc) >= 796 if ((k + no_of_desc) >=
729 ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count : 797 ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count :
730 last_cmd_consumer)) { 798 last_cmd_consumer)) {
799 port->stats.nocmddescriptor++;
800 DPRINTK(ERR, "No command descriptors available,"
801 " producer = %d, consumer = %d count=%llu,"
802 " dropping packet\n", producer,
803 adapter->last_cmd_consumer,
804 port->stats.nocmddescriptor);
805
806 netif_stop_queue(netdev);
807 port->flags |= NETXEN_NETDEV_STATUS;
731 spin_unlock_bh(&adapter->tx_lock); 808 spin_unlock_bh(&adapter->tx_lock);
732 if (tries == 0) { 809 return NETDEV_TX_BUSY;
733 local_bh_disable();
734 netxen_process_cmd_ring((unsigned long)adapter);
735 local_bh_enable();
736 ++tries;
737 goto retry_getting_window;
738 } else {
739 port->stats.nocmddescriptor++;
740 DPRINTK(ERR, "No command descriptors available,"
741 " producer = %d, consumer = %d count=%llu,"
742 " dropping packet\n", producer,
743 adapter->last_cmd_consumer,
744 port->stats.nocmddescriptor);
745
746 spin_lock_bh(&adapter->tx_lock);
747 netif_stop_queue(netdev);
748 port->flags |= NETXEN_NETDEV_STATUS;
749 spin_unlock_bh(&adapter->tx_lock);
750 return NETDEV_TX_BUSY;
751 }
752 } 810 }
753 k = get_index_range(k, max_tx_desc_count, no_of_desc); 811 k = get_index_range(k, max_tx_desc_count, no_of_desc);
754 adapter->cmd_producer = k; 812 adapter->cmd_producer = k;
@@ -770,7 +828,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
770 pbuf->mss = 0; 828 pbuf->mss = 0;
771 hwdesc->mss = 0; 829 hwdesc->mss = 0;
772 } 830 }
773 pbuf->no_of_descriptors = no_of_desc;
774 pbuf->total_length = skb->len; 831 pbuf->total_length = skb->len;
775 pbuf->skb = skb; 832 pbuf->skb = skb;
776 pbuf->cmd = TX_ETHER_PKT; 833 pbuf->cmd = TX_ETHER_PKT;
@@ -780,11 +837,11 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
780 buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len, 837 buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len,
781 PCI_DMA_TODEVICE); 838 PCI_DMA_TODEVICE);
782 buffrag->length = first_seg_len; 839 buffrag->length = first_seg_len;
783 CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len); 840 netxen_set_cmd_desc_totallength(hwdesc, skb->len);
784 hwdesc->num_of_buffers = frag_count; 841 netxen_set_cmd_desc_num_of_buff(hwdesc, frag_count);
785 hwdesc->opcode = TX_ETHER_PKT; 842 netxen_set_cmd_desc_opcode(hwdesc, TX_ETHER_PKT);
786 843
787 CMD_DESC_PORT_WRT(hwdesc, port->portnum); 844 netxen_set_cmd_desc_port(hwdesc, port->portnum);
788 hwdesc->buffer1_length = cpu_to_le16(first_seg_len); 845 hwdesc->buffer1_length = cpu_to_le16(first_seg_len);
789 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); 846 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
790 847
@@ -843,12 +900,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
843 /* For LSO, we need to copy the MAC/IP/TCP headers into 900 /* For LSO, we need to copy the MAC/IP/TCP headers into
844 * the descriptor ring 901 * the descriptor ring
845 */ 902 */
846 if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) { 903 if (netxen_get_cmd_desc_opcode(&hw->cmd_desc_head[saved_producer])
904 == TX_TCP_LSO) {
847 int hdr_len, first_hdr_len, more_hdr; 905 int hdr_len, first_hdr_len, more_hdr;
848 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length; 906 hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length;
849 if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { 907 if (hdr_len > (sizeof(struct cmd_desc_type0) - 2)) {
850 first_hdr_len = 908 first_hdr_len = sizeof(struct cmd_desc_type0) - 2;
851 sizeof(struct cmd_desc_type0) - NET_IP_ALIGN;
852 more_hdr = 1; 909 more_hdr = 1;
853 } else { 910 } else {
854 first_hdr_len = hdr_len; 911 first_hdr_len = hdr_len;
@@ -858,7 +915,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
858 hwdesc = &hw->cmd_desc_head[producer]; 915 hwdesc = &hw->cmd_desc_head[producer];
859 916
860 /* copy the first 64 bytes */ 917 /* copy the first 64 bytes */
861 memcpy(((void *)hwdesc) + NET_IP_ALIGN, 918 memcpy(((void *)hwdesc) + 2,
862 (void *)(skb->data), first_hdr_len); 919 (void *)(skb->data), first_hdr_len);
863 producer = get_next_index(producer, max_tx_desc_count); 920 producer = get_next_index(producer, max_tx_desc_count);
864 921
@@ -874,7 +931,7 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
874 } 931 }
875 spin_lock_bh(&adapter->tx_lock); 932 spin_lock_bh(&adapter->tx_lock);
876 port->stats.txbytes += 933 port->stats.txbytes +=
877 CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]); 934 netxen_get_cmd_desc_totallength(&hw->cmd_desc_head[saved_producer]);
878 /* Code to update the adapter considering how many producer threads 935 /* Code to update the adapter considering how many producer threads
879 are currently working */ 936 are currently working */
880 if ((--adapter->num_threads) == 0) { 937 if ((--adapter->num_threads) == 0) {
@@ -884,20 +941,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
884 NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET)); 941 NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET));
885 wmb(); 942 wmb();
886 adapter->total_threads = 0; 943 adapter->total_threads = 0;
887 } else {
888 u32 crb_producer = 0;
889 crb_producer =
890 readl(NETXEN_CRB_NORMALIZE
891 (adapter, CRB_CMD_PRODUCER_OFFSET));
892 if (crb_producer == local_producer) {
893 crb_producer = get_index_range(crb_producer,
894 max_tx_desc_count,
895 no_of_desc);
896 writel(crb_producer,
897 NETXEN_CRB_NORMALIZE(adapter,
898 CRB_CMD_PRODUCER_OFFSET));
899 wmb();
900 }
901 } 944 }
902 945
903 port->stats.xmitfinished++; 946 port->stats.xmitfinished++;
@@ -914,15 +957,20 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
914static void netxen_watchdog(unsigned long v) 957static void netxen_watchdog(unsigned long v)
915{ 958{
916 struct netxen_adapter *adapter = (struct netxen_adapter *)v; 959 struct netxen_adapter *adapter = (struct netxen_adapter *)v;
917 schedule_work(&adapter->watchdog_task); 960 if (adapter != g_adapter) {
961 printk("%s: ***BUG*** adapter[%p] != g_adapter[%p]\n",
962 __FUNCTION__, adapter, g_adapter);
963 return;
964 }
965
966 SCHEDULE_WORK(&adapter->watchdog_task);
918} 967}
919 968
920static void netxen_tx_timeout(struct net_device *netdev) 969static void netxen_tx_timeout(struct net_device *netdev)
921{ 970{
922 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); 971 struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev);
923 struct netxen_adapter *adapter = port->adapter;
924 972
925 schedule_work(&adapter->tx_timeout_task); 973 SCHEDULE_WORK(port->adapter->tx_timeout_task + port->portnum);
926} 974}
927 975
928static void netxen_tx_timeout_task(struct net_device *netdev) 976static void netxen_tx_timeout_task(struct net_device *netdev)
@@ -953,6 +1001,11 @@ netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev)
953 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { 1001 if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) {
954 int count = 0; 1002 int count = 0;
955 u32 mask; 1003 u32 mask;
1004 mask = readl(pci_base_offset(adapter, ISR_INT_VECTOR));
1005 if ((mask & 0x80) == 0) {
1006 /* not our interrupt */
1007 return ret;
1008 }
956 netxen_nic_disable_int(adapter); 1009 netxen_nic_disable_int(adapter);
957 /* Window = 0 or 1 */ 1010 /* Window = 0 or 1 */
958 do { 1011 do {
@@ -1012,7 +1065,10 @@ irqreturn_t netxen_intr(int irq, void *data)
1012 netdev = port->netdev; 1065 netdev = port->netdev;
1013 1066
1014 /* process our status queue (for all 4 ports) */ 1067 /* process our status queue (for all 4 ports) */
1015 netxen_handle_int(adapter, netdev); 1068 if (netif_running(netdev)) {
1069 netxen_handle_int(adapter, netdev);
1070 break;
1071 }
1016 } 1072 }
1017 1073
1018 return IRQ_HANDLED; 1074 return IRQ_HANDLED;
@@ -1054,11 +1110,11 @@ static int netxen_nic_poll(struct net_device *netdev, int *budget)
1054 netdev->quota -= work_done; 1110 netdev->quota -= work_done;
1055 *budget -= work_done; 1111 *budget -= work_done;
1056 1112
1057 if (work_done >= work_to_do 1113 if (work_done >= work_to_do && netxen_nic_rx_has_work(adapter) != 0)
1058 && netxen_nic_rx_has_work(adapter) != 0)
1059 done = 0; 1114 done = 0;
1060 1115
1061 netxen_process_cmd_ring((unsigned long)adapter); 1116 if (netxen_process_cmd_ring((unsigned long)adapter) == 0)
1117 done = 0;
1062 1118
1063 DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", 1119 DPRINTK(INFO, "new work_done: %d work_to_do: %d\n",
1064 work_done, work_to_do); 1120 work_done, work_to_do);
@@ -1104,8 +1160,9 @@ netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1104 if (ifr->ifr_data) { 1160 if (ifr->ifr_data) {
1105 sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP, 1161 sprintf(dev_name, "%s-%d", NETXEN_NIC_NAME_RSP,
1106 port->portnum); 1162 port->portnum);
1107 nr_bytes = copy_to_user((char *)ifr->ifr_data, dev_name, 1163 nr_bytes =
1108 NETXEN_NIC_NAME_LEN); 1164 copy_to_user((char __user *)ifr->ifr_data, dev_name,
1165 NETXEN_NIC_NAME_LEN);
1109 if (nr_bytes) 1166 if (nr_bytes)
1110 err = -EIO; 1167 err = -EIO;
1111 1168
@@ -1132,6 +1189,9 @@ static struct pci_driver netxen_driver = {
1132 1189
1133static int __init netxen_init_module(void) 1190static int __init netxen_init_module(void)
1134{ 1191{
1192 if ((netxen_workq = create_singlethread_workqueue("netxen")) == 0)
1193 return -ENOMEM;
1194
1135 return pci_module_init(&netxen_driver); 1195 return pci_module_init(&netxen_driver);
1136} 1196}
1137 1197
@@ -1142,7 +1202,7 @@ static void __exit netxen_exit_module(void)
1142 /* 1202 /*
1143 * Wait for some time to allow the dma to drain, if any. 1203 * Wait for some time to allow the dma to drain, if any.
1144 */ 1204 */
1145 mdelay(5); 1205 destroy_workqueue(netxen_workq);
1146 pci_unregister_driver(&netxen_driver); 1206 pci_unregister_driver(&netxen_driver);
1147} 1207}
1148 1208
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index ff74f1e413d4..4987dc765d99 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -40,13 +40,15 @@
40 40
41static long phy_lock_timeout = 100000000; 41static long phy_lock_timeout = 100000000;
42 42
43static inline int phy_lock(void) 43static inline int phy_lock(struct netxen_adapter *adapter)
44{ 44{
45 int i; 45 int i;
46 int done = 0, timeout = 0; 46 int done = 0, timeout = 0;
47 47
48 while (!done) { 48 while (!done) {
49 done = readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_LOCK)); 49 done =
50 readl(pci_base_offset
51 (adapter, NETXEN_PCIE_REG(PCIE_SEM3_LOCK)));
50 if (done == 1) 52 if (done == 1)
51 break; 53 break;
52 if (timeout >= phy_lock_timeout) { 54 if (timeout >= phy_lock_timeout) {
@@ -61,13 +63,15 @@ static inline int phy_lock(void)
61 } 63 }
62 } 64 }
63 65
64 writel(NETXEN_PHY_LOCK_ID, (void __iomem *)PHY_LOCK_DRIVER); 66 writel(PHY_LOCK_DRIVER,
67 NETXEN_CRB_NORMALIZE(adapter, NETXEN_PHY_LOCK_ID));
65 return 0; 68 return 0;
66} 69}
67 70
68static inline int phy_unlock(void) 71static inline int phy_unlock(struct netxen_adapter *adapter)
69{ 72{
70 readl((void __iomem *)NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)); 73 readl(pci_base_offset(adapter, NETXEN_PCIE_REG(PCIE_SEM3_UNLOCK)));
74
71 return 0; 75 return 0;
72} 76}
73 77
@@ -95,7 +99,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
95 __le32 status; 99 __le32 status;
96 __le32 mac_cfg0; 100 __le32 mac_cfg0;
97 101
98 if (phy_lock() != 0) { 102 if (phy_lock(adapter) != 0) {
99 return -1; 103 return -1;
100 } 104 }
101 105
@@ -162,7 +166,7 @@ int netxen_niu_gbe_phy_read(struct netxen_adapter *adapter, long phy,
162 NETXEN_NIU_GB_MAC_CONFIG_0(0), 166 NETXEN_NIU_GB_MAC_CONFIG_0(0),
163 &mac_cfg0, 4)) 167 &mac_cfg0, 4))
164 return -EIO; 168 return -EIO;
165 phy_unlock(); 169 phy_unlock(adapter);
166 return result; 170 return result;
167} 171}
168 172
@@ -612,7 +616,7 @@ int netxen_niu_macaddr_set(struct netxen_port *port,
612 __le32 temp = 0; 616 __le32 temp = 0;
613 struct netxen_adapter *adapter = port->adapter; 617 struct netxen_adapter *adapter = port->adapter;
614 int phy = port->portnum; 618 int phy = port->portnum;
615 unsigned char mac_addr[MAX_ADDR_LEN]; 619 unsigned char mac_addr[6];
616 int i; 620 int i;
617 621
618 for (i = 0; i < 10; i++) { 622 for (i = 0; i < 10; i++) {
@@ -631,7 +635,7 @@ int netxen_niu_macaddr_set(struct netxen_port *port,
631 635
632 netxen_niu_macaddr_get(adapter, phy, 636 netxen_niu_macaddr_get(adapter, phy,
633 (netxen_ethernet_macaddr_t *) mac_addr); 637 (netxen_ethernet_macaddr_t *) mac_addr);
634 if (memcmp(mac_addr, addr, MAX_ADDR_LEN == 0)) 638 if (memcmp(mac_addr, addr, 6) == 0)
635 break; 639 break;
636 } 640 }
637 641
diff --git a/drivers/net/netxen/netxen_nic_phan_reg.h b/drivers/net/netxen/netxen_nic_phan_reg.h
index 8181d436783f..7879f855af0b 100644
--- a/drivers/net/netxen/netxen_nic_phan_reg.h
+++ b/drivers/net/netxen/netxen_nic_phan_reg.h
@@ -33,15 +33,74 @@
33/* 33/*
34 * CRB Registers or queue message done only at initialization time. 34 * CRB Registers or queue message done only at initialization time.
35 */ 35 */
36#define NIC_CRB_BASE NETXEN_CAM_RAM(0x200)
37#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
36 38
37/* 39#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00)
38 * The following 2 are the base adresses for the CRB registers and their 40#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04)
39 * offsets will be added to get addresses for the index addresses. 41#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08)
40 */ 42#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
41#define NIC_CRB_BASE_PORT1 NETXEN_CAM_RAM(0x200) 43#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10) /* C0 EPG BUG */
42#define NIC_CRB_BASE_PORT2 NETXEN_CAM_RAM(0x250) 44#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
45#define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x18) /* host add:cmd ring */
46#define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x1c)
47#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x20) /* 4 regs for perf */
48#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x24)
49#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x28)
50#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x2c)
51#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x30) /* phantom init status */
52#define CRB_MMAP_ADDR_3 NETXEN_NIC_REG(0x34)
53#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x38)
54#define CRB_HOST_DUMMY_BUF_ADDR_HI NETXEN_NIC_REG(0x3c)
55#define CRB_HOST_DUMMY_BUF_ADDR_LO NETXEN_NIC_REG(0x40)
56#define CRB_MMAP_ADDR_0 NETXEN_NIC_REG(0x44)
57#define CRB_MMAP_ADDR_1 NETXEN_NIC_REG(0x48)
58#define CRB_MMAP_ADDR_2 NETXEN_NIC_REG(0x4c)
59#define CRB_CMDPEG_STATE NETXEN_NIC_REG(0x50)
60#define CRB_MMAP_SIZE_0 NETXEN_NIC_REG(0x54)
61#define CRB_MMAP_SIZE_1 NETXEN_NIC_REG(0x58)
62#define CRB_MMAP_SIZE_2 NETXEN_NIC_REG(0x5c)
63#define CRB_MMAP_SIZE_3 NETXEN_NIC_REG(0x60)
64#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x64) /* interrupt coalescing */
65#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x68)
66#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x6c)
67#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x70)
68#define CRB_RX_PKT_TIMER NETXEN_NIC_REG(0x74)
69#define CRB_TX_PKT_TIMER NETXEN_NIC_REG(0x78)
70#define CRB_RX_PKT_CNT NETXEN_NIC_REG(0x7c)
71#define CRB_RX_TMR_CNT NETXEN_NIC_REG(0x80)
72#define CRB_RX_LRO_TIMER NETXEN_NIC_REG(0x84)
73#define CRB_RX_LRO_MID_TIMER NETXEN_NIC_REG(0x88)
74#define CRB_DMA_MAX_RCV_BUFS NETXEN_NIC_REG(0x8c)
75#define CRB_MAX_DMA_ENTRIES NETXEN_NIC_REG(0x90)
76#define CRB_XG_STATE NETXEN_NIC_REG(0x94) /* XG Link status */
77#define CRB_AGENT_GO NETXEN_NIC_REG(0x98) /* NIC pkt gen agent */
78#define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0x9c)
79#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xa0)
80#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xa4)
81#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xa8)
82#define CRB_TX_STATE NETXEN_NIC_REG(0xac) /* Debug -performance */
83#define CRB_TX_COUNT NETXEN_NIC_REG(0xb0)
84#define CRB_RX_STATE NETXEN_NIC_REG(0xb4)
85#define CRB_RX_PERF_DEBUG_1 NETXEN_NIC_REG(0xb8)
86#define CRB_RX_LRO_CONTROL NETXEN_NIC_REG(0xbc) /* LRO On/OFF */
87#define CRB_RX_LRO_START_NUM NETXEN_NIC_REG(0xc0)
88#define CRB_MPORT_MODE NETXEN_NIC_REG(0xc4) /* Multiport Mode */
89#define CRB_CMD_RING_SIZE NETXEN_NIC_REG(0xc8)
90#define CRB_INT_VECTOR NETXEN_NIC_REG(0xd4)
91#define CRB_CTX_RESET NETXEN_NIC_REG(0xd8)
92#define CRB_HOST_STS_PROD NETXEN_NIC_REG(0xdc)
93#define CRB_HOST_STS_CONS NETXEN_NIC_REG(0xe0)
94#define CRB_PEG_CMD_PROD NETXEN_NIC_REG(0xe4)
95#define CRB_PEG_CMD_CONS NETXEN_NIC_REG(0xe8)
96#define CRB_HOST_BUFFER_PROD NETXEN_NIC_REG(0xec)
97#define CRB_HOST_BUFFER_CONS NETXEN_NIC_REG(0xf0)
98#define CRB_JUMBO_BUFFER_PROD NETXEN_NIC_REG(0xf4)
99#define CRB_JUMBO_BUFFER_CONS NETXEN_NIC_REG(0xf8)
43 100
44#define NETXEN_NIC_REG(X) (NIC_CRB_BASE_PORT1+(X)) 101#define CRB_CMD_PRODUCER_OFFSET_1 NETXEN_NIC_REG(0x1ac)
102#define CRB_CMD_CONSUMER_OFFSET_1 NETXEN_NIC_REG(0x1b0)
103#define CRB_TEMP_STATE NETXEN_NIC_REG(0x1b4)
45 104
46/* 105/*
47 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address 106 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
@@ -51,74 +110,20 @@
51 * on the Phantom. 110 * on the Phantom.
52 */ 111 */
53 112
54#define CRB_PHAN_CNTRL_LO_OFFSET NETXEN_NIC_REG(0x00) 113#define nx_get_temp_val(x) ((x) >> 16)
55#define CRB_PHAN_CNTRL_HI_OFFSET NETXEN_NIC_REG(0x04) 114#define nx_get_temp_state(x) ((x) & 0xffff)
56 115#define nx_encode_temp(val, state) (((val) << 16) | (state))
57/* point to the indexes */
58#define CRB_CMD_PRODUCER_OFFSET NETXEN_NIC_REG(0x08)
59#define CRB_CMD_CONSUMER_OFFSET NETXEN_NIC_REG(0x0c)
60
61#define CRB_PAUSE_ADDR_LO NETXEN_NIC_REG(0x10)
62#define CRB_PAUSE_ADDR_HI NETXEN_NIC_REG(0x14)
63
64/* address of command descriptors in the host memory */
65#define CRB_HOST_CMD_ADDR_HI NETXEN_NIC_REG(0x30)
66#define CRB_HOST_CMD_ADDR_LO NETXEN_NIC_REG(0x34)
67
68/* The following 4 CRB registers are for doing performance coal */
69#define CRB_CMD_INTR_LOOP NETXEN_NIC_REG(0x38)
70#define CRB_CMD_DMA_LOOP NETXEN_NIC_REG(0x3c)
71#define CRB_RCV_INTR_LOOP NETXEN_NIC_REG(0x40)
72#define CRB_RCV_DMA_LOOP NETXEN_NIC_REG(0x44)
73
74/* Needed by the host to find out the state of Phantom's initialization */
75#define CRB_ENABLE_TX_INTR NETXEN_NIC_REG(0x4c)
76#define CRB_CMDPEG_STATE NETXEN_NIC_REG(0x50)
77#define CRB_CMDPEG_CMDRING NETXEN_NIC_REG(0x54)
78
79/* Interrupt coalescing parameters */
80#define CRB_GLOBAL_INT_COAL NETXEN_NIC_REG(0x80)
81#define CRB_INT_COAL_MODE NETXEN_NIC_REG(0x84)
82#define CRB_MAX_RCV_BUFS NETXEN_NIC_REG(0x88)
83#define CRB_TX_INT_THRESHOLD NETXEN_NIC_REG(0x8c)
84#define CRB_RX_PKT_TIMER NETXEN_NIC_REG(0x90)
85#define CRB_TX_PKT_TIMER NETXEN_NIC_REG(0x94)
86#define CRB_RX_PKT_CNT NETXEN_NIC_REG(0x98)
87#define CRB_RX_TMR_CNT NETXEN_NIC_REG(0x9c)
88#define CRB_INT_THRESH NETXEN_NIC_REG(0xa4)
89
90/* Register for communicating XG link status */
91#define CRB_XG_STATE NETXEN_NIC_REG(0xa0)
92
93/* Register for communicating card temperature */
94/* Upper 16 bits are temperature value. Lower 16 bits are the state */
95#define CRB_TEMP_STATE NETXEN_NIC_REG(0xa8)
96#define nx_get_temp_val(x) ((x) >> 16)
97#define nx_get_temp_state(x) ((x) & 0xffff)
98#define nx_encode_temp(val, state) (((val) << 16) | (state))
99
100/* Debug registers for controlling NIC pkt gen agent */
101#define CRB_AGENT_GO NETXEN_NIC_REG(0xb0)
102#define CRB_AGENT_TX_SIZE NETXEN_NIC_REG(0xb4)
103#define CRB_AGENT_TX_TYPE NETXEN_NIC_REG(0xb8)
104#define CRB_AGENT_TX_ADDR NETXEN_NIC_REG(0xbc)
105#define CRB_AGENT_TX_MSS NETXEN_NIC_REG(0xc0)
106
107/* Debug registers for observing NIC performance */
108#define CRB_TX_STATE NETXEN_NIC_REG(0xd0)
109#define CRB_TX_COUNT NETXEN_NIC_REG(0xd4)
110#define CRB_RX_STATE NETXEN_NIC_REG(0xd8)
111 116
112/* CRB registers per Rcv Descriptor ring */ 117/* CRB registers per Rcv Descriptor ring */
113struct netxen_rcv_desc_crb { 118struct netxen_rcv_desc_crb {
114 u32 crb_rcv_producer_offset __attribute__ ((aligned(512))); 119 u32 crb_rcv_producer_offset __attribute__ ((aligned(512)));
115 u32 crb_rcv_consumer_offset; 120 u32 crb_rcv_consumer_offset;
116 u32 crb_globalrcv_ring; 121 u32 crb_globalrcv_ring;
122 u32 crb_rcv_ring_size;
117}; 123};
118 124
119/* 125/*
120 * CRB registers used by the receive peg logic. One instance of these 126 * CRB registers used by the receive peg logic.
121 * needs to be instantiated per instance of the receive peg.
122 */ 127 */
123 128
124struct netxen_recv_crb { 129struct netxen_recv_crb {
@@ -127,6 +132,7 @@ struct netxen_recv_crb {
127 u32 crb_rcv_status_producer; 132 u32 crb_rcv_status_producer;
128 u32 crb_rcv_status_consumer; 133 u32 crb_rcv_status_consumer;
129 u32 crb_rcvpeg_state; 134 u32 crb_rcvpeg_state;
135 u32 crb_status_ring_size;
130}; 136};
131 137
132#if defined(DEFINE_GLOBAL_RECV_CRB) 138#if defined(DEFINE_GLOBAL_RECV_CRB)
@@ -139,30 +145,48 @@ struct netxen_recv_crb recv_crb_registers[] = {
139 { 145 {
140 { 146 {
141 /* crb_rcv_producer_offset: */ 147 /* crb_rcv_producer_offset: */
142 NETXEN_NIC_REG(0x18), 148 NETXEN_NIC_REG(0x100),
143 /* crb_rcv_consumer_offset: */ 149 /* crb_rcv_consumer_offset: */
144 NETXEN_NIC_REG(0x1c), 150 NETXEN_NIC_REG(0x104),
145 /* crb_gloablrcv_ring: */ 151 /* crb_gloablrcv_ring: */
146 NETXEN_NIC_REG(0x20), 152 NETXEN_NIC_REG(0x108),
153 /* crb_rcv_ring_size */
154 NETXEN_NIC_REG(0x10c),
155
147 }, 156 },
148 /* Jumbo frames */ 157 /* Jumbo frames */
149 { 158 {
150 /* crb_rcv_producer_offset: */ 159 /* crb_rcv_producer_offset: */
151 NETXEN_NIC_REG(0x100), 160 NETXEN_NIC_REG(0x110),
152 /* crb_rcv_consumer_offset: */ 161 /* crb_rcv_consumer_offset: */
153 NETXEN_NIC_REG(0x104), 162 NETXEN_NIC_REG(0x114),
154 /* crb_gloablrcv_ring: */ 163 /* crb_gloablrcv_ring: */
155 NETXEN_NIC_REG(0x108), 164 NETXEN_NIC_REG(0x118),
165 /* crb_rcv_ring_size */
166 NETXEN_NIC_REG(0x11c),
167 },
168 /* LRO */
169 {
170 /* crb_rcv_producer_offset: */
171 NETXEN_NIC_REG(0x120),
172 /* crb_rcv_consumer_offset: */
173 NETXEN_NIC_REG(0x124),
174 /* crb_gloablrcv_ring: */
175 NETXEN_NIC_REG(0x128),
176 /* crb_rcv_ring_size */
177 NETXEN_NIC_REG(0x12c),
156 } 178 }
157 }, 179 },
158 /* crb_rcvstatus_ring: */ 180 /* crb_rcvstatus_ring: */
159 NETXEN_NIC_REG(0x24), 181 NETXEN_NIC_REG(0x130),
160 /* crb_rcv_status_producer: */ 182 /* crb_rcv_status_producer: */
161 NETXEN_NIC_REG(0x28), 183 NETXEN_NIC_REG(0x134),
162 /* crb_rcv_status_consumer: */ 184 /* crb_rcv_status_consumer: */
163 NETXEN_NIC_REG(0x2c), 185 NETXEN_NIC_REG(0x138),
164 /* crb_rcvpeg_state: */ 186 /* crb_rcvpeg_state: */
165 NETXEN_NIC_REG(0x48), 187 NETXEN_NIC_REG(0x13c),
188 /* crb_status_ring_size */
189 NETXEN_NIC_REG(0x140),
166 190
167 }, 191 },
168 /* 192 /*
@@ -173,34 +197,66 @@ struct netxen_recv_crb recv_crb_registers[] = {
173 { 197 {
174 { 198 {
175 /* crb_rcv_producer_offset: */ 199 /* crb_rcv_producer_offset: */
176 NETXEN_NIC_REG(0x80), 200 NETXEN_NIC_REG(0x144),
177 /* crb_rcv_consumer_offset: */ 201 /* crb_rcv_consumer_offset: */
178 NETXEN_NIC_REG(0x84), 202 NETXEN_NIC_REG(0x148),
179 /* crb_globalrcv_ring: */ 203 /* crb_globalrcv_ring: */
180 NETXEN_NIC_REG(0x88), 204 NETXEN_NIC_REG(0x14c),
205 /* crb_rcv_ring_size */
206 NETXEN_NIC_REG(0x150),
207
181 }, 208 },
182 /* Jumbo frames */ 209 /* Jumbo frames */
183 { 210 {
184 /* crb_rcv_producer_offset: */ 211 /* crb_rcv_producer_offset: */
185 NETXEN_NIC_REG(0x10C), 212 NETXEN_NIC_REG(0x154),
186 /* crb_rcv_consumer_offset: */ 213 /* crb_rcv_consumer_offset: */
187 NETXEN_NIC_REG(0x110), 214 NETXEN_NIC_REG(0x158),
188 /* crb_globalrcv_ring: */ 215 /* crb_globalrcv_ring: */
189 NETXEN_NIC_REG(0x114), 216 NETXEN_NIC_REG(0x15c),
217 /* crb_rcv_ring_size */
218 NETXEN_NIC_REG(0x160),
219 },
220 /* LRO */
221 {
222 /* crb_rcv_producer_offset: */
223 NETXEN_NIC_REG(0x164),
224 /* crb_rcv_consumer_offset: */
225 NETXEN_NIC_REG(0x168),
226 /* crb_globalrcv_ring: */
227 NETXEN_NIC_REG(0x16c),
228 /* crb_rcv_ring_size */
229 NETXEN_NIC_REG(0x170),
190 } 230 }
231
191 }, 232 },
192 /* crb_rcvstatus_ring: */ 233 /* crb_rcvstatus_ring: */
193 NETXEN_NIC_REG(0x8c), 234 NETXEN_NIC_REG(0x174),
194 /* crb_rcv_status_producer: */ 235 /* crb_rcv_status_producer: */
195 NETXEN_NIC_REG(0x90), 236 NETXEN_NIC_REG(0x178),
196 /* crb_rcv_status_consumer: */ 237 /* crb_rcv_status_consumer: */
197 NETXEN_NIC_REG(0x94), 238 NETXEN_NIC_REG(0x17c),
198 /* crb_rcvpeg_state: */ 239 /* crb_rcvpeg_state: */
199 NETXEN_NIC_REG(0x98), 240 NETXEN_NIC_REG(0x180),
241 /* crb_status_ring_size */
242 NETXEN_NIC_REG(0x184),
243
200 }, 244 },
201}; 245};
246
247u64 ctx_addr_sig_regs[][3] = {
248 {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
249 {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
250 {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
251 {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
252};
253
202#else 254#else
203extern struct netxen_recv_crb recv_crb_registers[]; 255extern struct netxen_recv_crb recv_crb_registers[];
256extern u64 ctx_addr_sig_regs[][3];
257#define CRB_CTX_ADDR_REG_LO (ctx_addr_sig_regs[0][0])
258#define CRB_CTX_ADDR_REG_HI (ctx_addr_sig_regs[0][2])
259#define CRB_CTX_SIGNATURE_REG (ctx_addr_sig_regs[0][1])
204#endif /* DEFINE_GLOBAL_RECEIVE_CRB */ 260#endif /* DEFINE_GLOBAL_RECEIVE_CRB */
205 261
206/* 262/*