aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/netxen/netxen_nic.h38
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c710
-rw-r--r--drivers/net/netxen/netxen_nic_init.c191
-rw-r--r--drivers/net/netxen/netxen_nic_main.c15
4 files changed, 876 insertions, 78 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index e41f62352b15..705fdf10f95d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -140,6 +140,7 @@
140#define NX_RX_NORMAL_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU) 140#define NX_RX_NORMAL_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
141#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU) 141#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
142#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU) 142#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
143#define NX_CT_DEFAULT_RX_BUF_LEN 2048
143 144
144#define MAX_RX_BUFFER_LENGTH 1760 145#define MAX_RX_BUFFER_LENGTH 1760
145#define MAX_RX_JUMBO_BUFFER_LENGTH 8062 146#define MAX_RX_JUMBO_BUFFER_LENGTH 8062
@@ -391,8 +392,8 @@ struct rcv_desc {
391}; 392};
392 393
393/* opcode field in status_desc */ 394/* opcode field in status_desc */
394#define RCV_NIC_PKT (0xA) 395#define NETXEN_NIC_RXPKT_DESC 0x04
395#define STATUS_NIC_PKT ((RCV_NIC_PKT) << 12) 396#define NETXEN_OLD_RXPKT_DESC 0x3f
396 397
397/* for status field in status_desc */ 398/* for status field in status_desc */
398#define STATUS_NEED_CKSUM (1) 399#define STATUS_NEED_CKSUM (1)
@@ -424,6 +425,8 @@ struct rcv_desc {
424 (((sts_data) >> 28) & 0xFFFF) 425 (((sts_data) >> 28) & 0xFFFF)
425#define netxen_get_sts_prot(sts_data) \ 426#define netxen_get_sts_prot(sts_data) \
426 (((sts_data) >> 44) & 0x0F) 427 (((sts_data) >> 44) & 0x0F)
428#define netxen_get_sts_pkt_offset(sts_data) \
429 (((sts_data) >> 48) & 0x1F)
427#define netxen_get_sts_opcode(sts_data) \ 430#define netxen_get_sts_opcode(sts_data) \
428 (((sts_data) >> 58) & 0x03F) 431 (((sts_data) >> 58) & 0x03F)
429 432
@@ -438,17 +441,30 @@ struct rcv_desc {
438 441
439struct status_desc { 442struct status_desc {
440 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length 443 /* Bit pattern: 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
441 28-43 reference_handle, 44-47 protocol, 48-52 unused 444 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
442 53-55 desc_cnt, 56-57 owner, 58-63 opcode 445 53-55 desc_cnt, 56-57 owner, 58-63 opcode
443 */ 446 */
444 __le64 status_desc_data; 447 __le64 status_desc_data;
445 __le32 hash_value; 448 union {
446 u8 hash_type; 449 struct {
447 u8 msg_type; 450 __le32 hash_value;
448 u8 unused; 451 u8 hash_type;
449 /* Bit pattern: 0-6 lro_count indicates frag sequence, 452 u8 msg_type;
450 7 last_frag indicates last frag */ 453 u8 unused;
451 u8 lro; 454 union {
455 /* Bit pattern: 0-6 lro_count indicates frag
456 * sequence, 7 last_frag indicates last frag
457 */
458 u8 lro;
459
460 /* chained buffers */
461 u8 nr_frags;
462 };
463 };
464 struct {
465 __le16 frag_handles[4];
466 };
467 };
452} __attribute__ ((aligned(16))); 468} __attribute__ ((aligned(16)));
453 469
454enum { 470enum {
@@ -774,6 +790,7 @@ struct netxen_cmd_buffer {
774 790
775/* In rx_buffer, we do not need multiple fragments as is a single buffer */ 791/* In rx_buffer, we do not need multiple fragments as is a single buffer */
776struct netxen_rx_buffer { 792struct netxen_rx_buffer {
793 struct list_head list;
777 struct sk_buff *skb; 794 struct sk_buff *skb;
778 u64 dma; 795 u64 dma;
779 u16 ref_handle; 796 u16 ref_handle;
@@ -854,6 +871,7 @@ struct nx_host_rds_ring {
854 u32 dma_size; 871 u32 dma_size;
855 u32 skb_size; 872 u32 skb_size;
856 struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ 873 struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */
874 struct list_head free_list;
857 int begin_alloc; 875 int begin_alloc;
858}; 876};
859 877
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
new file mode 100644
index 000000000000..64babc59e699
--- /dev/null
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -0,0 +1,710 @@
1/*
2 * Copyright (C) 2003 - 2008 NetXen, Inc.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called LICENSE.
22 *
23 * Contact Information:
24 * info@netxen.com
25 * NetXen,
26 * 3965 Freedom Circle, Fourth floor,
27 * Santa Clara, CA 95054
28 *
29 */
30
31#include "netxen_nic_hw.h"
32#include "netxen_nic.h"
33#include "netxen_nic_phan_reg.h"
34
35#define NXHAL_VERSION 1
36
37static int
38netxen_api_lock(struct netxen_adapter *adapter)
39{
40 u32 done = 0, timeout = 0;
41
42 for (;;) {
43 /* Acquire PCIE HW semaphore5 */
44 netxen_nic_read_w0(adapter,
45 NETXEN_PCIE_REG(PCIE_SEM5_LOCK), &done);
46
47 if (done == 1)
48 break;
49
50 if (++timeout >= NX_OS_CRB_RETRY_COUNT) {
51 printk(KERN_ERR "%s: lock timeout.\n", __func__);
52 return -1;
53 }
54
55 msleep(1);
56 }
57
58#if 0
59 netxen_nic_write_w1(adapter,
60 NETXEN_API_LOCK_ID, NX_OS_API_LOCK_DRIVER);
61#endif
62 return 0;
63}
64
65static int
66netxen_api_unlock(struct netxen_adapter *adapter)
67{
68 u32 val;
69
70 /* Release PCIE HW semaphore5 */
71 netxen_nic_read_w0(adapter,
72 NETXEN_PCIE_REG(PCIE_SEM5_UNLOCK), &val);
73 return 0;
74}
75
76static u32
77netxen_poll_rsp(struct netxen_adapter *adapter)
78{
79 u32 raw_rsp, rsp = NX_CDRP_RSP_OK;
80 int timeout = 0;
81
82 do {
83 /* give atleast 1ms for firmware to respond */
84 msleep(1);
85
86 if (++timeout > NX_OS_CRB_RETRY_COUNT)
87 return NX_CDRP_RSP_TIMEOUT;
88
89 netxen_nic_read_w1(adapter, NX_CDRP_CRB_OFFSET,
90 &raw_rsp);
91
92 rsp = le32_to_cpu(raw_rsp);
93 } while (!NX_CDRP_IS_RSP(rsp));
94
95 return rsp;
96}
97
98static u32
99netxen_issue_cmd(struct netxen_adapter *adapter,
100 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
101{
102 u32 rsp;
103 u32 signature = 0;
104 u32 rcode = NX_RCODE_SUCCESS;
105
106 signature = NX_CDRP_SIGNATURE_MAKE(pci_fn, version);
107
108 /* Acquire semaphore before accessing CRB */
109 if (netxen_api_lock(adapter))
110 return NX_RCODE_TIMEOUT;
111
112 netxen_nic_write_w1(adapter, NX_SIGN_CRB_OFFSET,
113 cpu_to_le32(signature));
114
115 netxen_nic_write_w1(adapter, NX_ARG1_CRB_OFFSET,
116 cpu_to_le32(arg1));
117
118 netxen_nic_write_w1(adapter, NX_ARG2_CRB_OFFSET,
119 cpu_to_le32(arg2));
120
121 netxen_nic_write_w1(adapter, NX_ARG3_CRB_OFFSET,
122 cpu_to_le32(arg3));
123
124 netxen_nic_write_w1(adapter, NX_CDRP_CRB_OFFSET,
125 cpu_to_le32(NX_CDRP_FORM_CMD(cmd)));
126
127 rsp = netxen_poll_rsp(adapter);
128
129 if (rsp == NX_CDRP_RSP_TIMEOUT) {
130 printk(KERN_ERR "%s: card response timeout.\n",
131 netxen_nic_driver_name);
132
133 rcode = NX_RCODE_TIMEOUT;
134 } else if (rsp == NX_CDRP_RSP_FAIL) {
135 netxen_nic_read_w1(adapter, NX_ARG1_CRB_OFFSET, &rcode);
136 rcode = le32_to_cpu(rcode);
137
138 printk(KERN_ERR "%s: failed card response code:0x%x\n",
139 netxen_nic_driver_name, rcode);
140 }
141
142 /* Release semaphore */
143 netxen_api_unlock(adapter);
144
145 return rcode;
146}
147
148u32
149nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, u32 mtu)
150{
151 u32 rcode = NX_RCODE_SUCCESS;
152 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
153
154 if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
155 rcode = netxen_issue_cmd(adapter,
156 adapter->ahw.pci_func,
157 NXHAL_VERSION,
158 recv_ctx->context_id,
159 mtu,
160 0,
161 NX_CDRP_CMD_SET_MTU);
162
163 return rcode;
164}
165
166static int
167nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
168{
169 void *addr;
170 nx_hostrq_rx_ctx_t *prq;
171 nx_cardrsp_rx_ctx_t *prsp;
172 nx_hostrq_rds_ring_t *prq_rds;
173 nx_hostrq_sds_ring_t *prq_sds;
174 nx_cardrsp_rds_ring_t *prsp_rds;
175 nx_cardrsp_sds_ring_t *prsp_sds;
176 struct nx_host_rds_ring *rds_ring;
177
178 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
179 u64 phys_addr;
180
181 int i, nrds_rings, nsds_rings;
182 size_t rq_size, rsp_size;
183 u32 cap, reg;
184
185 int err;
186
187 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
188
189 /* only one sds ring for now */
190 nrds_rings = adapter->max_rds_rings;
191 nsds_rings = 1;
192
193 rq_size =
194 SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
195 rsp_size =
196 SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
197
198 addr = pci_alloc_consistent(adapter->pdev,
199 rq_size, &hostrq_phys_addr);
200 if (addr == NULL)
201 return -ENOMEM;
202 prq = (nx_hostrq_rx_ctx_t *)addr;
203
204 addr = pci_alloc_consistent(adapter->pdev,
205 rsp_size, &cardrsp_phys_addr);
206 if (addr == NULL) {
207 err = -ENOMEM;
208 goto out_free_rq;
209 }
210 prsp = (nx_cardrsp_rx_ctx_t *)addr;
211
212 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
213
214 cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
215 cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
216
217 prq->capabilities[0] = cpu_to_le32(cap);
218 prq->host_int_crb_mode =
219 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
220 prq->host_rds_crb_mode =
221 cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
222
223 prq->num_rds_rings = cpu_to_le16(nrds_rings);
224 prq->num_sds_rings = cpu_to_le16(nsds_rings);
225 prq->rds_ring_offset = 0;
226 prq->sds_ring_offset = prq->rds_ring_offset +
227 (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
228
229 prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + prq->rds_ring_offset);
230
231 for (i = 0; i < nrds_rings; i++) {
232
233 rds_ring = &recv_ctx->rds_rings[i];
234
235 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
236 prq_rds[i].ring_size = cpu_to_le32(rds_ring->max_rx_desc_count);
237 prq_rds[i].ring_kind = cpu_to_le32(i);
238 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
239 }
240
241 prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + prq->sds_ring_offset);
242
243 prq_sds[0].host_phys_addr =
244 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
245 prq_sds[0].ring_size = cpu_to_le32(adapter->max_rx_desc_count);
246 /* only one msix vector for now */
247 prq_sds[0].msi_index = cpu_to_le32(0);
248
249 /* now byteswap offsets */
250 prq->rds_ring_offset = cpu_to_le32(prq->rds_ring_offset);
251 prq->sds_ring_offset = cpu_to_le32(prq->sds_ring_offset);
252
253 phys_addr = hostrq_phys_addr;
254 err = netxen_issue_cmd(adapter,
255 adapter->ahw.pci_func,
256 NXHAL_VERSION,
257 (u32)(phys_addr >> 32),
258 (u32)(phys_addr & 0xffffffff),
259 rq_size,
260 NX_CDRP_CMD_CREATE_RX_CTX);
261 if (err) {
262 printk(KERN_WARNING
263 "Failed to create rx ctx in firmware%d\n", err);
264 goto out_free_rsp;
265 }
266
267
268 prsp_rds = ((nx_cardrsp_rds_ring_t *)
269 &prsp->data[prsp->rds_ring_offset]);
270
271 for (i = 0; i < le32_to_cpu(prsp->num_rds_rings); i++) {
272 rds_ring = &recv_ctx->rds_rings[i];
273
274 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
275 rds_ring->crb_rcv_producer = NETXEN_NIC_REG(reg - 0x200);
276 }
277
278 prsp_sds = ((nx_cardrsp_sds_ring_t *)
279 &prsp->data[prsp->sds_ring_offset]);
280 reg = le32_to_cpu(prsp_sds[0].host_consumer_crb);
281 recv_ctx->crb_sts_consumer = NETXEN_NIC_REG(reg - 0x200);
282
283 reg = le32_to_cpu(prsp_sds[0].interrupt_crb);
284 adapter->crb_intr_mask = NETXEN_NIC_REG(reg - 0x200);
285
286 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
287 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
288 recv_ctx->virt_port = le16_to_cpu(prsp->virt_port);
289
290out_free_rsp:
291 pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
292out_free_rq:
293 pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
294 return err;
295}
296
297static void
298nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
299{
300 struct netxen_recv_context *recv_ctx = &adapter->recv_ctx[0];
301
302 if (netxen_issue_cmd(adapter,
303 adapter->ahw.pci_func,
304 NXHAL_VERSION,
305 recv_ctx->context_id,
306 NX_DESTROY_CTX_RESET,
307 0,
308 NX_CDRP_CMD_DESTROY_RX_CTX)) {
309
310 printk(KERN_WARNING
311 "%s: Failed to destroy rx ctx in firmware\n",
312 netxen_nic_driver_name);
313 }
314}
315
316static int
317nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
318{
319 nx_hostrq_tx_ctx_t *prq;
320 nx_hostrq_cds_ring_t *prq_cds;
321 nx_cardrsp_tx_ctx_t *prsp;
322 void *rq_addr, *rsp_addr;
323 size_t rq_size, rsp_size;
324 u32 temp;
325 int err = 0;
326 u64 offset, phys_addr;
327 dma_addr_t rq_phys_addr, rsp_phys_addr;
328
329 rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
330 rq_addr = pci_alloc_consistent(adapter->pdev,
331 rq_size, &rq_phys_addr);
332 if (!rq_addr)
333 return -ENOMEM;
334
335 rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
336 rsp_addr = pci_alloc_consistent(adapter->pdev,
337 rsp_size, &rsp_phys_addr);
338 if (!rsp_addr) {
339 err = -ENOMEM;
340 goto out_free_rq;
341 }
342
343 memset(rq_addr, 0, rq_size);
344 prq = (nx_hostrq_tx_ctx_t *)rq_addr;
345
346 memset(rsp_addr, 0, rsp_size);
347 prsp = (nx_cardrsp_tx_ctx_t *)rsp_addr;
348
349 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
350
351 temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
352 prq->capabilities[0] = cpu_to_le32(temp);
353
354 prq->host_int_crb_mode =
355 cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
356
357 prq->interrupt_ctl = 0;
358 prq->msi_index = 0;
359
360 prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
361
362 offset = adapter->ctx_desc_phys_addr+sizeof(struct netxen_ring_ctx);
363 prq->cmd_cons_dma_addr = cpu_to_le64(offset);
364
365 prq_cds = &prq->cds_ring;
366
367 prq_cds->host_phys_addr =
368 cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
369
370 prq_cds->ring_size = cpu_to_le32(adapter->max_tx_desc_count);
371
372 phys_addr = rq_phys_addr;
373 err = netxen_issue_cmd(adapter,
374 adapter->ahw.pci_func,
375 NXHAL_VERSION,
376 (u32)(phys_addr >> 32),
377 ((u32)phys_addr & 0xffffffff),
378 rq_size,
379 NX_CDRP_CMD_CREATE_TX_CTX);
380
381 if (err == NX_RCODE_SUCCESS) {
382 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
383 adapter->crb_addr_cmd_producer =
384 NETXEN_NIC_REG(temp - 0x200);
385#if 0
386 adapter->tx_state =
387 le32_to_cpu(prsp->host_ctx_state);
388#endif
389 adapter->tx_context_id =
390 le16_to_cpu(prsp->context_id);
391 } else {
392 printk(KERN_WARNING
393 "Failed to create tx ctx in firmware%d\n", err);
394 err = -EIO;
395 }
396
397 pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
398
399out_free_rq:
400 pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
401
402 return err;
403}
404
405static void
406nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
407{
408 if (netxen_issue_cmd(adapter,
409 adapter->ahw.pci_func,
410 NXHAL_VERSION,
411 adapter->tx_context_id,
412 NX_DESTROY_CTX_RESET,
413 0,
414 NX_CDRP_CMD_DESTROY_TX_CTX)) {
415
416 printk(KERN_WARNING
417 "%s: Failed to destroy tx ctx in firmware\n",
418 netxen_nic_driver_name);
419 }
420}
421
422static u64 ctx_addr_sig_regs[][3] = {
423 {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
424 {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
425 {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
426 {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
427};
428
429#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
430#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
431#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
432
433#define lower32(x) ((u32)((x) & 0xffffffff))
434#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
435
436static struct netxen_recv_crb recv_crb_registers[] = {
437 /* Instance 0 */
438 {
439 /* crb_rcv_producer: */
440 {
441 NETXEN_NIC_REG(0x100),
442 /* Jumbo frames */
443 NETXEN_NIC_REG(0x110),
444 /* LRO */
445 NETXEN_NIC_REG(0x120)
446 },
447 /* crb_sts_consumer: */
448 NETXEN_NIC_REG(0x138),
449 },
450 /* Instance 1 */
451 {
452 /* crb_rcv_producer: */
453 {
454 NETXEN_NIC_REG(0x144),
455 /* Jumbo frames */
456 NETXEN_NIC_REG(0x154),
457 /* LRO */
458 NETXEN_NIC_REG(0x164)
459 },
460 /* crb_sts_consumer: */
461 NETXEN_NIC_REG(0x17c),
462 },
463 /* Instance 2 */
464 {
465 /* crb_rcv_producer: */
466 {
467 NETXEN_NIC_REG(0x1d8),
468 /* Jumbo frames */
469 NETXEN_NIC_REG(0x1f8),
470 /* LRO */
471 NETXEN_NIC_REG(0x208)
472 },
473 /* crb_sts_consumer: */
474 NETXEN_NIC_REG(0x220),
475 },
476 /* Instance 3 */
477 {
478 /* crb_rcv_producer: */
479 {
480 NETXEN_NIC_REG(0x22c),
481 /* Jumbo frames */
482 NETXEN_NIC_REG(0x23c),
483 /* LRO */
484 NETXEN_NIC_REG(0x24c)
485 },
486 /* crb_sts_consumer: */
487 NETXEN_NIC_REG(0x264),
488 },
489};
490
491static int
492netxen_init_old_ctx(struct netxen_adapter *adapter)
493{
494 struct netxen_recv_context *recv_ctx;
495 struct nx_host_rds_ring *rds_ring;
496 int ctx, ring;
497 int func_id = adapter->portnum;
498
499 adapter->ctx_desc->cmd_ring_addr =
500 cpu_to_le64(adapter->ahw.cmd_desc_phys_addr);
501 adapter->ctx_desc->cmd_ring_size =
502 cpu_to_le32(adapter->max_tx_desc_count);
503
504 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
505 recv_ctx = &adapter->recv_ctx[ctx];
506
507 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
508 rds_ring = &recv_ctx->rds_rings[ring];
509
510 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_addr =
511 cpu_to_le64(rds_ring->phys_addr);
512 adapter->ctx_desc->rcv_ctx[ring].rcv_ring_size =
513 cpu_to_le32(rds_ring->max_rx_desc_count);
514 }
515 adapter->ctx_desc->sts_ring_addr =
516 cpu_to_le64(recv_ctx->rcv_status_desc_phys_addr);
517 adapter->ctx_desc->sts_ring_size =
518 cpu_to_le32(adapter->max_rx_desc_count);
519 }
520
521 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_LO(func_id),
522 lower32(adapter->ctx_desc_phys_addr));
523 adapter->pci_write_normalize(adapter, CRB_CTX_ADDR_REG_HI(func_id),
524 upper32(adapter->ctx_desc_phys_addr));
525 adapter->pci_write_normalize(adapter, CRB_CTX_SIGNATURE_REG(func_id),
526 NETXEN_CTX_SIGNATURE | func_id);
527 return 0;
528}
529
530static uint32_t sw_int_mask[4] = {
531 CRB_SW_INT_MASK_0, CRB_SW_INT_MASK_1,
532 CRB_SW_INT_MASK_2, CRB_SW_INT_MASK_3
533};
534
535int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
536{
537 struct netxen_hardware_context *hw = &adapter->ahw;
538 u32 state = 0;
539 void *addr;
540 int err = 0;
541 int ctx, ring;
542 struct netxen_recv_context *recv_ctx;
543 struct nx_host_rds_ring *rds_ring;
544
545 err = netxen_receive_peg_ready(adapter);
546 if (err) {
547 printk(KERN_ERR "Rcv Peg initialization not complete:%x.\n",
548 state);
549 return err;
550 }
551
552 addr = pci_alloc_consistent(adapter->pdev,
553 sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
554 &adapter->ctx_desc_phys_addr);
555
556 if (addr == NULL) {
557 DPRINTK(ERR, "failed to allocate hw context\n");
558 return -ENOMEM;
559 }
560 memset(addr, 0, sizeof(struct netxen_ring_ctx));
561 adapter->ctx_desc = (struct netxen_ring_ctx *)addr;
562 adapter->ctx_desc->ctx_id = cpu_to_le32(adapter->portnum);
563 adapter->ctx_desc->cmd_consumer_offset =
564 cpu_to_le64(adapter->ctx_desc_phys_addr +
565 sizeof(struct netxen_ring_ctx));
566 adapter->cmd_consumer =
567 (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
568
569 /* cmd desc ring */
570 addr = pci_alloc_consistent(adapter->pdev,
571 sizeof(struct cmd_desc_type0) *
572 adapter->max_tx_desc_count,
573 &hw->cmd_desc_phys_addr);
574
575 if (addr == NULL) {
576 printk(KERN_ERR "%s failed to allocate tx desc ring\n",
577 netxen_nic_driver_name);
578 return -ENOMEM;
579 }
580
581 hw->cmd_desc_head = (struct cmd_desc_type0 *)addr;
582
583 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
584 recv_ctx = &adapter->recv_ctx[ctx];
585
586 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
587 /* rx desc ring */
588 rds_ring = &recv_ctx->rds_rings[ring];
589 addr = pci_alloc_consistent(adapter->pdev,
590 RCV_DESC_RINGSIZE,
591 &rds_ring->phys_addr);
592 if (addr == NULL) {
593 printk(KERN_ERR "%s failed to allocate rx "
594 "desc ring[%d]\n",
595 netxen_nic_driver_name, ring);
596 err = -ENOMEM;
597 goto err_out_free;
598 }
599 rds_ring->desc_head = (struct rcv_desc *)addr;
600
601 if (adapter->fw_major < 4)
602 rds_ring->crb_rcv_producer =
603 recv_crb_registers[adapter->portnum].
604 crb_rcv_producer[ring];
605 }
606
607 /* status desc ring */
608 addr = pci_alloc_consistent(adapter->pdev,
609 STATUS_DESC_RINGSIZE,
610 &recv_ctx->rcv_status_desc_phys_addr);
611 if (addr == NULL) {
612 printk(KERN_ERR "%s failed to allocate sts desc ring\n",
613 netxen_nic_driver_name);
614 err = -ENOMEM;
615 goto err_out_free;
616 }
617 recv_ctx->rcv_status_desc_head = (struct status_desc *)addr;
618
619 if (adapter->fw_major < 4)
620 recv_ctx->crb_sts_consumer =
621 recv_crb_registers[adapter->portnum].
622 crb_sts_consumer;
623 }
624
625 if (adapter->fw_major >= 4) {
626 adapter->intr_scheme = INTR_SCHEME_PERPORT;
627 adapter->msi_mode = MSI_MODE_MULTIFUNC;
628
629 err = nx_fw_cmd_create_rx_ctx(adapter);
630 if (err)
631 goto err_out_free;
632 err = nx_fw_cmd_create_tx_ctx(adapter);
633 if (err)
634 goto err_out_free;
635 } else {
636
637 adapter->intr_scheme = adapter->pci_read_normalize(adapter,
638 CRB_NIC_CAPABILITIES_FW);
639 adapter->msi_mode = adapter->pci_read_normalize(adapter,
640 CRB_NIC_MSI_MODE_FW);
641 adapter->crb_intr_mask = sw_int_mask[adapter->portnum];
642
643 err = netxen_init_old_ctx(adapter);
644 if (err) {
645 netxen_free_hw_resources(adapter);
646 return err;
647 }
648
649 }
650
651 return 0;
652
653err_out_free:
654 netxen_free_hw_resources(adapter);
655 return err;
656}
657
658void netxen_free_hw_resources(struct netxen_adapter *adapter)
659{
660 struct netxen_recv_context *recv_ctx;
661 struct nx_host_rds_ring *rds_ring;
662 int ctx, ring;
663
664 if (adapter->fw_major >= 4) {
665 nx_fw_cmd_destroy_tx_ctx(adapter);
666 nx_fw_cmd_destroy_rx_ctx(adapter);
667 }
668
669 if (adapter->ctx_desc != NULL) {
670 pci_free_consistent(adapter->pdev,
671 sizeof(struct netxen_ring_ctx) +
672 sizeof(uint32_t),
673 adapter->ctx_desc,
674 adapter->ctx_desc_phys_addr);
675 adapter->ctx_desc = NULL;
676 }
677
678 if (adapter->ahw.cmd_desc_head != NULL) {
679 pci_free_consistent(adapter->pdev,
680 sizeof(struct cmd_desc_type0) *
681 adapter->max_tx_desc_count,
682 adapter->ahw.cmd_desc_head,
683 adapter->ahw.cmd_desc_phys_addr);
684 adapter->ahw.cmd_desc_head = NULL;
685 }
686
687 for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) {
688 recv_ctx = &adapter->recv_ctx[ctx];
689 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
690 rds_ring = &recv_ctx->rds_rings[ring];
691
692 if (rds_ring->desc_head != NULL) {
693 pci_free_consistent(adapter->pdev,
694 RCV_DESC_RINGSIZE,
695 rds_ring->desc_head,
696 rds_ring->phys_addr);
697 rds_ring->desc_head = NULL;
698 }
699 }
700
701 if (recv_ctx->rcv_status_desc_head != NULL) {
702 pci_free_consistent(adapter->pdev,
703 STATUS_DESC_RINGSIZE,
704 recv_ctx->rcv_status_desc_head,
705 recv_ctx->rcv_status_desc_phys_addr);
706 recv_ctx->rcv_status_desc_head = NULL;
707 }
708 }
709}
710
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 7b5124057664..01ab31b34a85 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -262,17 +262,30 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
262 rds_ring->max_rx_desc_count = 262 rds_ring->max_rx_desc_count =
263 adapter->max_rx_desc_count; 263 adapter->max_rx_desc_count;
264 rds_ring->flags = RCV_DESC_NORMAL; 264 rds_ring->flags = RCV_DESC_NORMAL;
265 rds_ring->dma_size = RX_DMA_MAP_LEN; 265 if (adapter->ahw.cut_through) {
266 rds_ring->skb_size = MAX_RX_BUFFER_LENGTH; 266 rds_ring->dma_size =
267 NX_CT_DEFAULT_RX_BUF_LEN;
268 rds_ring->skb_size =
269 NX_CT_DEFAULT_RX_BUF_LEN;
270 } else {
271 rds_ring->dma_size = RX_DMA_MAP_LEN;
272 rds_ring->skb_size =
273 MAX_RX_BUFFER_LENGTH;
274 }
267 break; 275 break;
268 276
269 case RCV_DESC_JUMBO: 277 case RCV_DESC_JUMBO:
270 rds_ring->max_rx_desc_count = 278 rds_ring->max_rx_desc_count =
271 adapter->max_jumbo_rx_desc_count; 279 adapter->max_jumbo_rx_desc_count;
272 rds_ring->flags = RCV_DESC_JUMBO; 280 rds_ring->flags = RCV_DESC_JUMBO;
273 rds_ring->dma_size = RX_JUMBO_DMA_MAP_LEN; 281 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
282 rds_ring->dma_size =
283 NX_P3_RX_JUMBO_BUF_MAX_LEN;
284 else
285 rds_ring->dma_size =
286 NX_P2_RX_JUMBO_BUF_MAX_LEN;
274 rds_ring->skb_size = 287 rds_ring->skb_size =
275 MAX_RX_JUMBO_BUFFER_LENGTH; 288 rds_ring->dma_size + NET_IP_ALIGN;
276 break; 289 break;
277 290
278 case RCV_RING_LRO: 291 case RCV_RING_LRO:
@@ -294,6 +307,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
294 goto err_out; 307 goto err_out;
295 } 308 }
296 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); 309 memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE);
310 INIT_LIST_HEAD(&rds_ring->free_list);
297 rds_ring->begin_alloc = 0; 311 rds_ring->begin_alloc = 0;
298 /* 312 /*
299 * Now go through all of them, set reference handles 313 * Now go through all of them, set reference handles
@@ -302,6 +316,8 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
302 num_rx_bufs = rds_ring->max_rx_desc_count; 316 num_rx_bufs = rds_ring->max_rx_desc_count;
303 rx_buf = rds_ring->rx_buf_arr; 317 rx_buf = rds_ring->rx_buf_arr;
304 for (i = 0; i < num_rx_bufs; i++) { 318 for (i = 0; i < num_rx_bufs; i++) {
319 list_add_tail(&rx_buf->list,
320 &rds_ring->free_list);
305 rx_buf->ref_handle = i; 321 rx_buf->ref_handle = i;
306 rx_buf->state = NETXEN_BUFFER_FREE; 322 rx_buf->state = NETXEN_BUFFER_FREE;
307 rx_buf++; 323 rx_buf++;
@@ -1137,15 +1153,47 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter)
1137 return 0; 1153 return 0;
1138} 1154}
1139 1155
1156static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
1157 struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
1158{
1159 struct netxen_rx_buffer *buffer;
1160 struct sk_buff *skb;
1161
1162 buffer = &rds_ring->rx_buf_arr[index];
1163
1164 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1165 PCI_DMA_FROMDEVICE);
1166
1167 skb = buffer->skb;
1168 if (!skb)
1169 goto no_skb;
1170
1171 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1172 adapter->stats.csummed++;
1173 skb->ip_summed = CHECKSUM_UNNECESSARY;
1174 } else
1175 skb->ip_summed = CHECKSUM_NONE;
1176
1177 skb->dev = adapter->netdev;
1178
1179 buffer->skb = NULL;
1180
1181no_skb:
1182 buffer->state = NETXEN_BUFFER_FREE;
1183 buffer->lro_current_frags = 0;
1184 buffer->lro_expected_frags = 0;
1185 list_add_tail(&buffer->list, &rds_ring->free_list);
1186 return skb;
1187}
1188
1140/* 1189/*
1141 * netxen_process_rcv() send the received packet to the protocol stack. 1190 * netxen_process_rcv() send the received packet to the protocol stack.
1142 * and if the number of receives exceeds RX_BUFFERS_REFILL, then we 1191 * and if the number of receives exceeds RX_BUFFERS_REFILL, then we
1143 * invoke the routine to send more rx buffers to the Phantom... 1192 * invoke the routine to send more rx buffers to the Phantom...
1144 */ 1193 */
1145static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, 1194static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1146 struct status_desc *desc) 1195 struct status_desc *desc, struct status_desc *frag_desc)
1147{ 1196{
1148 struct pci_dev *pdev = adapter->pdev;
1149 struct net_device *netdev = adapter->netdev; 1197 struct net_device *netdev = adapter->netdev;
1150 u64 sts_data = le64_to_cpu(desc->status_desc_data); 1198 u64 sts_data = le64_to_cpu(desc->status_desc_data);
1151 int index = netxen_get_sts_refhandle(sts_data); 1199 int index = netxen_get_sts_refhandle(sts_data);
@@ -1154,8 +1202,8 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1154 struct sk_buff *skb; 1202 struct sk_buff *skb;
1155 u32 length = netxen_get_sts_totallength(sts_data); 1203 u32 length = netxen_get_sts_totallength(sts_data);
1156 u32 desc_ctx; 1204 u32 desc_ctx;
1205 u16 pkt_offset = 0, cksum;
1157 struct nx_host_rds_ring *rds_ring; 1206 struct nx_host_rds_ring *rds_ring;
1158 int ret;
1159 1207
1160 desc_ctx = netxen_get_sts_type(sts_data); 1208 desc_ctx = netxen_get_sts_type(sts_data);
1161 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { 1209 if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) {
@@ -1191,41 +1239,52 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid,
1191 } 1239 }
1192 } 1240 }
1193 1241
1194 pci_unmap_single(pdev, buffer->dma, rds_ring->dma_size, 1242 cksum = netxen_get_sts_status(sts_data);
1195 PCI_DMA_FROMDEVICE);
1196
1197 skb = (struct sk_buff *)buffer->skb;
1198 1243
1199 if (likely(adapter->rx_csum && 1244 skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
1200 netxen_get_sts_status(sts_data) == STATUS_CKSUM_OK)) { 1245 if (!skb)
1201 adapter->stats.csummed++; 1246 return;
1202 skb->ip_summed = CHECKSUM_UNNECESSARY;
1203 } else
1204 skb->ip_summed = CHECKSUM_NONE;
1205 1247
1206 skb->dev = netdev;
1207 if (desc_ctx == RCV_DESC_LRO_CTXID) { 1248 if (desc_ctx == RCV_DESC_LRO_CTXID) {
1208 /* True length was only available on the last pkt */ 1249 /* True length was only available on the last pkt */
1209 skb_put(skb, buffer->lro_length); 1250 skb_put(skb, buffer->lro_length);
1210 } else { 1251 } else {
1211 skb_put(skb, length); 1252 if (length > rds_ring->skb_size)
1253 skb_put(skb, rds_ring->skb_size);
1254 else
1255 skb_put(skb, length);
1256
1257 pkt_offset = netxen_get_sts_pkt_offset(sts_data);
1258 if (pkt_offset)
1259 skb_pull(skb, pkt_offset);
1212 } 1260 }
1213 1261
1214 skb->protocol = eth_type_trans(skb, netdev); 1262 skb->protocol = eth_type_trans(skb, netdev);
1215 1263
1216 ret = netif_receive_skb(skb);
1217 netdev->last_rx = jiffies;
1218
1219 /* 1264 /*
1220 * We just consumed one buffer so post a buffer. 1265 * rx buffer chaining is disabled, walk and free
1266 * any spurious rx buffer chain.
1221 */ 1267 */
1222 buffer->skb = NULL; 1268 if (frag_desc) {
1223 buffer->state = NETXEN_BUFFER_FREE; 1269 u16 i, nr_frags = desc->nr_frags;
1224 buffer->lro_current_frags = 0; 1270
1225 buffer->lro_expected_frags = 0; 1271 dev_kfree_skb_any(skb);
1272 for (i = 0; i < nr_frags; i++) {
1273 index = frag_desc->frag_handles[i];
1274 skb = netxen_process_rxbuf(adapter,
1275 rds_ring, index, cksum);
1276 if (skb)
1277 dev_kfree_skb_any(skb);
1278 }
1279 adapter->stats.rxdropped++;
1280 } else {
1226 1281
1227 adapter->stats.no_rcv++; 1282 netif_receive_skb(skb);
1228 adapter->stats.rxbytes += length; 1283 netdev->last_rx = jiffies;
1284
1285 adapter->stats.no_rcv++;
1286 adapter->stats.rxbytes += length;
1287 }
1229} 1288}
1230 1289
1231/* Process Receive status ring */ 1290/* Process Receive status ring */
@@ -1233,9 +1292,11 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1233{ 1292{
1234 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); 1293 struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]);
1235 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; 1294 struct status_desc *desc_head = recv_ctx->rcv_status_desc_head;
1236 struct status_desc *desc; /* used to read status desc here */ 1295 struct status_desc *desc, *frag_desc;
1237 u32 consumer = recv_ctx->status_rx_consumer; 1296 u32 consumer = recv_ctx->status_rx_consumer;
1238 int count = 0, ring; 1297 int count = 0, ring;
1298 u64 sts_data;
1299 u16 opcode;
1239 1300
1240 while (count < max) { 1301 while (count < max) {
1241 desc = &desc_head[consumer]; 1302 desc = &desc_head[consumer];
@@ -1244,9 +1305,26 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max)
1244 netxen_get_sts_owner(desc)); 1305 netxen_get_sts_owner(desc));
1245 break; 1306 break;
1246 } 1307 }
1247 netxen_process_rcv(adapter, ctxid, desc); 1308
1309 sts_data = le64_to_cpu(desc->status_desc_data);
1310 opcode = netxen_get_sts_opcode(sts_data);
1311 frag_desc = NULL;
1312 if (opcode == NETXEN_NIC_RXPKT_DESC) {
1313 if (desc->nr_frags) {
1314 consumer = get_next_index(consumer,
1315 adapter->max_rx_desc_count);
1316 frag_desc = &desc_head[consumer];
1317 netxen_set_sts_owner(frag_desc,
1318 STATUS_OWNER_PHANTOM);
1319 }
1320 }
1321
1322 netxen_process_rcv(adapter, ctxid, desc, frag_desc);
1323
1248 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); 1324 netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM);
1249 consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); 1325
1326 consumer = get_next_index(consumer,
1327 adapter->max_rx_desc_count);
1250 count++; 1328 count++;
1251 } 1329 }
1252 for (ring = 0; ring < adapter->max_rds_rings; ring++) 1330 for (ring = 0; ring < adapter->max_rds_rings; ring++)
@@ -1348,36 +1426,31 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1348 int index = 0; 1426 int index = 0;
1349 netxen_ctx_msg msg = 0; 1427 netxen_ctx_msg msg = 0;
1350 dma_addr_t dma; 1428 dma_addr_t dma;
1429 struct list_head *head;
1351 1430
1352 rds_ring = &recv_ctx->rds_rings[ringid]; 1431 rds_ring = &recv_ctx->rds_rings[ringid];
1353 1432
1354 producer = rds_ring->producer; 1433 producer = rds_ring->producer;
1355 index = rds_ring->begin_alloc; 1434 index = rds_ring->begin_alloc;
1356 buffer = &rds_ring->rx_buf_arr[index]; 1435 head = &rds_ring->free_list;
1436
1357 /* We can start writing rx descriptors into the phantom memory. */ 1437 /* We can start writing rx descriptors into the phantom memory. */
1358 while (buffer->state == NETXEN_BUFFER_FREE) { 1438 while (!list_empty(head)) {
1439
1359 skb = dev_alloc_skb(rds_ring->skb_size); 1440 skb = dev_alloc_skb(rds_ring->skb_size);
1360 if (unlikely(!skb)) { 1441 if (unlikely(!skb)) {
1361 /*
1362 * TODO
1363 * We need to schedule the posting of buffers to the pegs.
1364 */
1365 rds_ring->begin_alloc = index; 1442 rds_ring->begin_alloc = index;
1366 DPRINTK(ERR, "netxen_post_rx_buffers: "
1367 " allocated only %d buffers\n", count);
1368 break; 1443 break;
1369 } 1444 }
1370 1445
1446 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1447 list_del(&buffer->list);
1448
1371 count++; /* now there should be no failure */ 1449 count++; /* now there should be no failure */
1372 pdesc = &rds_ring->desc_head[producer]; 1450 pdesc = &rds_ring->desc_head[producer];
1373 1451
1374#if defined(XGB_DEBUG) 1452 if (!adapter->ahw.cut_through)
1375 *(unsigned long *)(skb->head) = 0xc0debabe; 1453 skb_reserve(skb, 2);
1376 if (skb_is_nonlinear(skb)) {
1377 printk("Allocated SKB @%p is nonlinear\n");
1378 }
1379#endif
1380 skb_reserve(skb, 2);
1381 /* This will be setup when we receive the 1454 /* This will be setup when we receive the
1382 * buffer after it has been filled FSL TBD TBD 1455 * buffer after it has been filled FSL TBD TBD
1383 * skb->dev = netdev; 1456 * skb->dev = netdev;
@@ -1395,7 +1468,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid)
1395 producer = 1468 producer =
1396 get_next_index(producer, rds_ring->max_rx_desc_count); 1469 get_next_index(producer, rds_ring->max_rx_desc_count);
1397 index = get_next_index(index, rds_ring->max_rx_desc_count); 1470 index = get_next_index(index, rds_ring->max_rx_desc_count);
1398 buffer = &rds_ring->rx_buf_arr[index];
1399 } 1471 }
1400 /* if we did allocate buffers, then write the count to Phantom */ 1472 /* if we did allocate buffers, then write the count to Phantom */
1401 if (count) { 1473 if (count) {
@@ -1439,32 +1511,29 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1439 struct netxen_rx_buffer *buffer; 1511 struct netxen_rx_buffer *buffer;
1440 int count = 0; 1512 int count = 0;
1441 int index = 0; 1513 int index = 0;
1514 struct list_head *head;
1442 1515
1443 rds_ring = &recv_ctx->rds_rings[ringid]; 1516 rds_ring = &recv_ctx->rds_rings[ringid];
1444 1517
1445 producer = rds_ring->producer; 1518 producer = rds_ring->producer;
1446 index = rds_ring->begin_alloc; 1519 index = rds_ring->begin_alloc;
1447 buffer = &rds_ring->rx_buf_arr[index]; 1520 head = &rds_ring->free_list;
1448 /* We can start writing rx descriptors into the phantom memory. */ 1521 /* We can start writing rx descriptors into the phantom memory. */
1449 while (buffer->state == NETXEN_BUFFER_FREE) { 1522 while (!list_empty(head)) {
1523
1450 skb = dev_alloc_skb(rds_ring->skb_size); 1524 skb = dev_alloc_skb(rds_ring->skb_size);
1451 if (unlikely(!skb)) { 1525 if (unlikely(!skb)) {
1452 /*
1453 * We need to schedule the posting of buffers to the pegs.
1454 */
1455 rds_ring->begin_alloc = index; 1526 rds_ring->begin_alloc = index;
1456 DPRINTK(ERR, "netxen_post_rx_buffers_nodb: "
1457 " allocated only %d buffers\n", count);
1458 break; 1527 break;
1459 } 1528 }
1529
1530 buffer = list_entry(head->next, struct netxen_rx_buffer, list);
1531 list_del(&buffer->list);
1532
1460 count++; /* now there should be no failure */ 1533 count++; /* now there should be no failure */
1461 pdesc = &rds_ring->desc_head[producer]; 1534 pdesc = &rds_ring->desc_head[producer];
1462 skb_reserve(skb, 2); 1535 if (!adapter->ahw.cut_through)
1463 /* 1536 skb_reserve(skb, 2);
1464 * This will be setup when we receive the
1465 * buffer after it has been filled
1466 * skb->dev = netdev;
1467 */
1468 buffer->skb = skb; 1537 buffer->skb = skb;
1469 buffer->state = NETXEN_BUFFER_BUSY; 1538 buffer->state = NETXEN_BUFFER_BUSY;
1470 buffer->dma = pci_map_single(pdev, skb->data, 1539 buffer->dma = pci_map_single(pdev, skb->data,
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index df48f89afff1..b3c084f0c751 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -844,17 +844,18 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
844 /* Handshake with the card before we register the devices. */ 844 /* Handshake with the card before we register the devices. */
845 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE); 845 netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
846 846
847 if (NX_IS_REVISION_P3(revision_id)) {
848 adapter->hw_read_wx(adapter,
849 NETXEN_MIU_MN_CONTROL, &val, 4);
850 adapter->ahw.cut_through = (val & 0x4) ? 1 : 0;
851 dev_info(&pdev->dev, "firmware running in %s mode\n",
852 adapter->ahw.cut_through ? "cut through" : "legacy");
853 }
854 } /* first_driver */ 847 } /* first_driver */
855 848
856 netxen_nic_flash_print(adapter); 849 netxen_nic_flash_print(adapter);
857 850
851 if (NX_IS_REVISION_P3(revision_id)) {
852 adapter->hw_read_wx(adapter,
853 NETXEN_MIU_MN_CONTROL, &val, 4);
854 adapter->ahw.cut_through = (val & 0x4) ? 1 : 0;
855 dev_info(&pdev->dev, "firmware running in %s mode\n",
856 adapter->ahw.cut_through ? "cut through" : "legacy");
857 }
858
858 /* 859 /*
859 * See if the firmware gave us a virtual-physical port mapping. 860 * See if the firmware gave us a virtual-physical port mapping.
860 */ 861 */