aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3')
-rw-r--r--drivers/net/cxgb3/adapter.h38
-rw-r--r--drivers/net/cxgb3/common.h28
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c43
-rw-r--r--drivers/net/cxgb3/regs.h11
-rw-r--r--drivers/net/cxgb3/sge.c423
-rw-r--r--drivers/net/cxgb3/t3_hw.c128
-rw-r--r--drivers/net/cxgb3/version.h2
7 files changed, 424 insertions, 249 deletions
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 80c3d8f268a7..ab72563b81ee 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,27 +71,29 @@ enum { /* adapter flags */
71 QUEUES_BOUND = (1 << 3), 71 QUEUES_BOUND = (1 << 3),
72}; 72};
73 73
74struct fl_pg_chunk {
75 struct page *page;
76 void *va;
77 unsigned int offset;
78};
79
74struct rx_desc; 80struct rx_desc;
75struct rx_sw_desc; 81struct rx_sw_desc;
76 82
77struct sge_fl_page { 83struct sge_fl { /* SGE per free-buffer list state */
78 struct skb_frag_struct frag; 84 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned char *va; 85 unsigned int credits; /* # of available Rx buffers */
80}; 86 unsigned int size; /* capacity of free list */
81 87 unsigned int cidx; /* consumer index */
82struct sge_fl { /* SGE per free-buffer list state */ 88 unsigned int pidx; /* producer index */
83 unsigned int buf_size; /* size of each Rx buffer */ 89 unsigned int gen; /* free list generation */
84 unsigned int credits; /* # of available Rx buffers */ 90 struct fl_pg_chunk pg_chunk;/* page chunk cache */
85 unsigned int size; /* capacity of free list */ 91 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
86 unsigned int cidx; /* consumer index */ 92 struct rx_desc *desc; /* address of HW Rx descriptor ring */
87 unsigned int pidx; /* producer index */ 93 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
88 unsigned int gen; /* free list generation */ 94 dma_addr_t phys_addr; /* physical address of HW ring start */
89 unsigned int cntxt_id; /* SGE context id for the free list */ 95 unsigned int cntxt_id; /* SGE context id for the free list */
90 struct sge_fl_page page; 96 unsigned long empty; /* # of times queue ran out of buffers */
91 struct rx_desc *desc; /* address of HW Rx descriptor ring */
92 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
93 dma_addr_t phys_addr; /* physical address of HW ring start */
94 unsigned long empty; /* # of times queue ran out of buffers */
95 unsigned long alloc_failed; /* # of times buffer allocation failed */ 97 unsigned long alloc_failed; /* # of times buffer allocation failed */
96}; 98};
97 99
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 8d1379633698..16378004507a 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -101,6 +101,7 @@ enum {
101 TCB_SIZE = 128, /* TCB size */ 101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */ 102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */ 103 NCCTRL_WIN = 32, /* # of congestion control windows */
104 PROTO_SRAM_LINES = 128, /* size of TP sram */
104}; 105};
105 106
106#define MAX_RX_COALESCING_LEN 16224U 107#define MAX_RX_COALESCING_LEN 16224U
@@ -124,6 +125,30 @@ enum { /* adapter interrupt-maintained statistics */
124}; 125};
125 126
126enum { 127enum {
128 TP_VERSION_MAJOR = 1,
129 TP_VERSION_MINOR = 0,
130 TP_VERSION_MICRO = 44
131};
132
133#define S_TP_VERSION_MAJOR 16
134#define M_TP_VERSION_MAJOR 0xFF
135#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
136#define G_TP_VERSION_MAJOR(x) \
137 (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
138
139#define S_TP_VERSION_MINOR 8
140#define M_TP_VERSION_MINOR 0xFF
141#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
142#define G_TP_VERSION_MINOR(x) \
143 (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
144
145#define S_TP_VERSION_MICRO 0
146#define M_TP_VERSION_MICRO 0xFF
147#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
148#define G_TP_VERSION_MICRO(x) \
149 (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
150
151enum {
127 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ 152 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
128 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ 153 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
129 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ 154 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
@@ -654,6 +679,9 @@ const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
654int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); 679int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
655int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data); 680int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
656int t3_seeprom_wp(struct adapter *adapter, int enable); 681int t3_seeprom_wp(struct adapter *adapter, int enable);
682int t3_check_tpsram_version(struct adapter *adapter);
683int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
684int t3_set_proto_sram(struct adapter *adap, u8 *data);
657int t3_read_flash(struct adapter *adapter, unsigned int addr, 685int t3_read_flash(struct adapter *adapter, unsigned int addr,
658 unsigned int nwords, u32 *data, int byte_oriented); 686 unsigned int nwords, u32 *data, int byte_oriented);
659int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); 687int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8a1f5452c51..15defe4c4f05 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2088,6 +2088,42 @@ static void cxgb_netpoll(struct net_device *dev)
2088} 2088}
2089#endif 2089#endif
2090 2090
2091#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2092int update_tpsram(struct adapter *adap)
2093{
2094 const struct firmware *tpsram;
2095 char buf[64];
2096 struct device *dev = &adap->pdev->dev;
2097 int ret;
2098 char rev;
2099
2100 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2101
2102 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2103 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2104
2105 ret = request_firmware(&tpsram, buf, dev);
2106 if (ret < 0) {
2107 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2108 buf);
2109 return ret;
2110 }
2111
2112 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2113 if (ret)
2114 goto release_tpsram;
2115
2116 ret = t3_set_proto_sram(adap, tpsram->data);
2117 if (ret)
2118 dev_err(dev, "loading protocol SRAM failed\n");
2119
2120release_tpsram:
2121 release_firmware(tpsram);
2122
2123 return ret;
2124}
2125
2126
2091/* 2127/*
2092 * Periodic accumulation of MAC statistics. 2128 * Periodic accumulation of MAC statistics.
2093 */ 2129 */
@@ -2437,6 +2473,13 @@ static int __devinit init_one(struct pci_dev *pdev,
2437 goto out_free_dev; 2473 goto out_free_dev;
2438 } 2474 }
2439 2475
2476 err = t3_check_tpsram_version(adapter);
2477 if (err == -EINVAL)
2478 err = update_tpsram(adapter);
2479
2480 if (err)
2481 goto out_free_dev;
2482
2440 /* 2483 /*
2441 * The card is now ready to go. If any errors occur during device 2484 * The card is now ready to go. If any errors occur during device
2442 * registration we do not fail the whole card but rather proceed only 2485 * registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 020859c855d7..aa80313c922e 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1160,6 +1160,8 @@
1160 1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434 1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162 1162
1163#define A_TP_MOD_RATE_LIMIT 0x438
1164
1163#define A_TP_PIO_ADDR 0x440 1165#define A_TP_PIO_ADDR 0x440
1164 1166
1165#define A_TP_PIO_DATA 0x444 1167#define A_TP_PIO_DATA 0x444
@@ -1214,6 +1216,15 @@
1214#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ 1216#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
1215 M_TXDROPCNTCH0RCVD) 1217 M_TXDROPCNTCH0RCVD)
1216 1218
1219#define A_TP_PROXY_FLOW_CNTL 0x4b0
1220
1221#define A_TP_EMBED_OP_FIELD0 0x4e8
1222#define A_TP_EMBED_OP_FIELD1 0x4ec
1223#define A_TP_EMBED_OP_FIELD2 0x4f0
1224#define A_TP_EMBED_OP_FIELD3 0x4f4
1225#define A_TP_EMBED_OP_FIELD4 0x4f8
1226#define A_TP_EMBED_OP_FIELD5 0x4fc
1227
1217#define A_ULPRX_CTL 0x500 1228#define A_ULPRX_CTL 0x500
1218 1229
1219#define S_ROUND_ROBIN 4 1230#define S_ROUND_ROBIN 4
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a60ec4d4707c..a2cfd68ac757 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -46,23 +46,16 @@
46 46
47#define SGE_RX_SM_BUF_SIZE 1536 47#define SGE_RX_SM_BUF_SIZE 1536
48 48
49/*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54#define USE_RX_PAGE
55#define RX_PAGE_SIZE 2048
56
57/*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
61#define SGE_RX_COPY_THRES 256 49#define SGE_RX_COPY_THRES 256
50#define SGE_RX_PULL_LEN 128
62 51
63/* 52/*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames. 53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
65 */ 56 */
57#define FL0_PG_CHUNK_SIZE 2048
58
66#define SGE_RX_DROP_THRES 16 59#define SGE_RX_DROP_THRES 16
67 60
68/* 61/*
@@ -100,12 +93,12 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
100 struct sk_buff *skb; 93 struct sk_buff *skb;
101}; 94};
102 95
103struct rx_sw_desc { /* SW state per Rx descriptor */ 96struct rx_sw_desc { /* SW state per Rx descriptor */
104 union { 97 union {
105 struct sk_buff *skb; 98 struct sk_buff *skb;
106 struct sge_fl_page page; 99 struct fl_pg_chunk pg_chunk;
107 } t; 100 };
108 DECLARE_PCI_UNMAP_ADDR(dma_addr); 101 DECLARE_PCI_UNMAP_ADDR(dma_addr);
109}; 102};
110 103
111struct rsp_desc { /* response queue descriptor */ 104struct rsp_desc { /* response queue descriptor */
@@ -351,27 +344,26 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
351 344
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 345 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE); 346 q->buf_size, PCI_DMA_FROMDEVICE);
354 347 if (q->use_pages) {
355 if (q->buf_size != RX_PAGE_SIZE) { 348 put_page(d->pg_chunk.page);
356 kfree_skb(d->t.skb); 349 d->pg_chunk.page = NULL;
357 d->t.skb = NULL;
358 } else { 350 } else {
359 if (d->t.page.frag.page) 351 kfree_skb(d->skb);
360 put_page(d->t.page.frag.page); 352 d->skb = NULL;
361 d->t.page.frag.page = NULL;
362 } 353 }
363 if (++cidx == q->size) 354 if (++cidx == q->size)
364 cidx = 0; 355 cidx = 0;
365 } 356 }
366 357
367 if (q->page.frag.page) 358 if (q->pg_chunk.page) {
368 put_page(q->page.frag.page); 359 __free_page(q->pg_chunk.page);
369 q->page.frag.page = NULL; 360 q->pg_chunk.page = NULL;
361 }
370} 362}
371 363
372/** 364/**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list 365 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add 366 * @va: buffer start VA
375 * @len: the buffer length 367 * @len: the buffer length
376 * @d: the HW Rx descriptor to write 368 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write 369 * @sd: the SW Rx descriptor to write
@@ -381,7 +373,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381 * Add a buffer of the given length to the supplied HW and SW Rx 373 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors. 374 * descriptors.
383 */ 375 */
384static inline void add_one_rx_buf(unsigned char *va, unsigned int len, 376static inline void add_one_rx_buf(void *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd, 377 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev) 378 unsigned int gen, struct pci_dev *pdev)
387{ 379{
@@ -397,6 +389,27 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 389 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
398} 390}
399 391
392static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
393{
394 if (!q->pg_chunk.page) {
395 q->pg_chunk.page = alloc_page(gfp);
396 if (unlikely(!q->pg_chunk.page))
397 return -ENOMEM;
398 q->pg_chunk.va = page_address(q->pg_chunk.page);
399 q->pg_chunk.offset = 0;
400 }
401 sd->pg_chunk = q->pg_chunk;
402
403 q->pg_chunk.offset += q->buf_size;
404 if (q->pg_chunk.offset == PAGE_SIZE)
405 q->pg_chunk.page = NULL;
406 else {
407 q->pg_chunk.va += q->buf_size;
408 get_page(q->pg_chunk.page);
409 }
410 return 0;
411}
412
400/** 413/**
401 * refill_fl - refill an SGE free-buffer list 414 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter 415 * @adapter: the adapter
@@ -410,49 +423,29 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
410 */ 423 */
411static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 424static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
412{ 425{
426 void *buf_start;
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 427 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx]; 428 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
416 429
417 while (n--) { 430 while (n--) {
418 unsigned char *va; 431 if (q->use_pages) {
419 432 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) { 433nomem: q->alloc_failed++;
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break; 434 break;
426 } 435 }
427 va = skb->data; 436 buf_start = sd->pg_chunk.va;
428 sd->t.skb = skb;
429 } else { 437 } else {
430 if (!p->frag.page) { 438 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441 439
442 memcpy(&sd->t, p, sizeof(*p)); 440 if (!skb)
443 va = p->va; 441 goto nomem;
444 442
445 p->frag.page_offset += RX_PAGE_SIZE; 443 sd->skb = skb;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE); 444 buf_start = skb->data;
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 } 445 }
453 446
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); 447 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
455 448 adap->pdev);
456 d++; 449 d++;
457 sd++; 450 sd++;
458 if (++q->pidx == q->size) { 451 if (++q->pidx == q->size) {
@@ -487,7 +480,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
487 struct rx_desc *from = &q->desc[idx]; 480 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx]; 481 struct rx_desc *to = &q->desc[q->pidx];
489 482
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); 483 q->sdesc[q->pidx] = q->sdesc[idx];
491 to->addr_lo = from->addr_lo; /* already big endian */ 484 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */ 485 to->addr_hi = from->addr_hi; /* likewise */
493 wmb(); 486 wmb();
@@ -650,6 +643,132 @@ static inline unsigned int flits_to_desc(unsigned int n)
650} 643}
651 644
652/** 645/**
646 * get_packet - return the next ingress packet buffer from a free list
647 * @adap: the adapter that received the packet
648 * @fl: the SGE free list holding the packet
649 * @len: the packet length including any SGE padding
650 * @drop_thres: # of remaining buffers before we start dropping packets
651 *
652 * Get the next packet from a free list and complete setup of the
653 * sk_buff. If the packet is small we make a copy and recycle the
654 * original buffer, otherwise we use the original buffer itself. If a
655 * positive drop threshold is supplied packets are dropped and their
656 * buffers recycled if (a) the number of remaining buffers is under the
657 * threshold and the packet is too big to copy, or (b) the packet should
658 * be copied but there is no memory for the copy.
659 */
660static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
661 unsigned int len, unsigned int drop_thres)
662{
663 struct sk_buff *skb = NULL;
664 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
665
666 prefetch(sd->skb->data);
667 fl->credits--;
668
669 if (len <= SGE_RX_COPY_THRES) {
670 skb = alloc_skb(len, GFP_ATOMIC);
671 if (likely(skb != NULL)) {
672 __skb_put(skb, len);
673 pci_dma_sync_single_for_cpu(adap->pdev,
674 pci_unmap_addr(sd, dma_addr), len,
675 PCI_DMA_FROMDEVICE);
676 memcpy(skb->data, sd->skb->data, len);
677 pci_dma_sync_single_for_device(adap->pdev,
678 pci_unmap_addr(sd, dma_addr), len,
679 PCI_DMA_FROMDEVICE);
680 } else if (!drop_thres)
681 goto use_orig_buf;
682recycle:
683 recycle_rx_buf(adap, fl, fl->cidx);
684 return skb;
685 }
686
687 if (unlikely(fl->credits < drop_thres))
688 goto recycle;
689
690use_orig_buf:
691 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
692 fl->buf_size, PCI_DMA_FROMDEVICE);
693 skb = sd->skb;
694 skb_put(skb, len);
695 __refill_fl(adap, fl);
696 return skb;
697}
698
699/**
700 * get_packet_pg - return the next ingress packet buffer from a free list
701 * @adap: the adapter that received the packet
702 * @fl: the SGE free list holding the packet
703 * @len: the packet length including any SGE padding
704 * @drop_thres: # of remaining buffers before we start dropping packets
705 *
706 * Get the next packet from a free list populated with page chunks.
707 * If the packet is small we make a copy and recycle the original buffer,
708 * otherwise we attach the original buffer as a page fragment to a fresh
709 * sk_buff. If a positive drop threshold is supplied packets are dropped
710 * and their buffers recycled if (a) the number of remaining buffers is
711 * under the threshold and the packet is too big to copy, or (b) there's
712 * no system memory.
713 *
714 * Note: this function is similar to @get_packet but deals with Rx buffers
715 * that are page chunks rather than sk_buffs.
716 */
717static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
718 unsigned int len, unsigned int drop_thres)
719{
720 struct sk_buff *skb = NULL;
721 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
722
723 if (len <= SGE_RX_COPY_THRES) {
724 skb = alloc_skb(len, GFP_ATOMIC);
725 if (likely(skb != NULL)) {
726 __skb_put(skb, len);
727 pci_dma_sync_single_for_cpu(adap->pdev,
728 pci_unmap_addr(sd, dma_addr), len,
729 PCI_DMA_FROMDEVICE);
730 memcpy(skb->data, sd->pg_chunk.va, len);
731 pci_dma_sync_single_for_device(adap->pdev,
732 pci_unmap_addr(sd, dma_addr), len,
733 PCI_DMA_FROMDEVICE);
734 } else if (!drop_thres)
735 return NULL;
736recycle:
737 fl->credits--;
738 recycle_rx_buf(adap, fl, fl->cidx);
739 return skb;
740 }
741
742 if (unlikely(fl->credits <= drop_thres))
743 goto recycle;
744
745 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
746 if (unlikely(!skb)) {
747 if (!drop_thres)
748 return NULL;
749 goto recycle;
750 }
751
752 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
753 fl->buf_size, PCI_DMA_FROMDEVICE);
754 __skb_put(skb, SGE_RX_PULL_LEN);
755 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
756 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
757 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
758 len - SGE_RX_PULL_LEN);
759 skb->len = len;
760 skb->data_len = len - SGE_RX_PULL_LEN;
761 skb->truesize += skb->data_len;
762
763 fl->credits--;
764 /*
765 * We do not refill FLs here, we let the caller do it to overlap a
766 * prefetch.
767 */
768 return skb;
769}
770
771/**
653 * get_imm_packet - return the next ingress packet buffer from a response 772 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data 773 * @resp: the response descriptor containing the packet data
655 * 774 *
@@ -1715,85 +1834,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1715 netif_rx(skb); 1834 netif_rx(skb);
1716} 1835}
1717 1836
1718#define SKB_DATA_SIZE 128
1719
1720static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1721 unsigned int len)
1722{
1723 skb->len = len;
1724 if (len <= SKB_DATA_SIZE) {
1725 skb_copy_to_linear_data(skb, p->va, len);
1726 skb->tail += len;
1727 put_page(p->frag.page);
1728 } else {
1729 skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
1730 skb_shinfo(skb)->frags[0].page = p->frag.page;
1731 skb_shinfo(skb)->frags[0].page_offset =
1732 p->frag.page_offset + SKB_DATA_SIZE;
1733 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1734 skb_shinfo(skb)->nr_frags = 1;
1735 skb->data_len = len - SKB_DATA_SIZE;
1736 skb->tail += SKB_DATA_SIZE;
1737 skb->truesize += skb->data_len;
1738 }
1739}
1740
1741/**
1742* get_packet - return the next ingress packet buffer from a free list
1743* @adap: the adapter that received the packet
1744* @fl: the SGE free list holding the packet
1745* @len: the packet length including any SGE padding
1746* @drop_thres: # of remaining buffers before we start dropping packets
1747*
1748* Get the next packet from a free list and complete setup of the
1749* sk_buff. If the packet is small we make a copy and recycle the
1750* original buffer, otherwise we use the original buffer itself. If a
1751* positive drop threshold is supplied packets are dropped and their
1752* buffers recycled if (a) the number of remaining buffers is under the
1753* threshold and the packet is too big to copy, or (b) the packet should
1754* be copied but there is no memory for the copy.
1755*/
1756static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1757 unsigned int len, unsigned int drop_thres)
1758{
1759 struct sk_buff *skb = NULL;
1760 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1761
1762 prefetch(sd->t.skb->data);
1763
1764 if (len <= SGE_RX_COPY_THRES) {
1765 skb = alloc_skb(len, GFP_ATOMIC);
1766 if (likely(skb != NULL)) {
1767 struct rx_desc *d = &fl->desc[fl->cidx];
1768 dma_addr_t mapping =
1769 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1770 be32_to_cpu(d->addr_lo));
1771
1772 __skb_put(skb, len);
1773 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1774 PCI_DMA_FROMDEVICE);
1775 skb_copy_from_linear_data(sd->t.skb, skb->data, len);
1776 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1777 PCI_DMA_FROMDEVICE);
1778 } else if (!drop_thres)
1779 goto use_orig_buf;
1780recycle:
1781 recycle_rx_buf(adap, fl, fl->cidx);
1782 return skb;
1783 }
1784
1785 if (unlikely(fl->credits < drop_thres))
1786 goto recycle;
1787
1788use_orig_buf:
1789 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1790 fl->buf_size, PCI_DMA_FROMDEVICE);
1791 skb = sd->t.skb;
1792 skb_put(skb, len);
1793 __refill_fl(adap, fl);
1794 return skb;
1795}
1796
1797/** 1837/**
1798 * handle_rsp_cntrl_info - handles control information in a response 1838 * handle_rsp_cntrl_info - handles control information in a response
1799 * @qs: the queue set corresponding to the response 1839 * @qs: the queue set corresponding to the response
@@ -1935,7 +1975,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1935 } else if (flags & F_RSPD_IMM_DATA_VALID) { 1975 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1936 skb = get_imm_packet(r); 1976 skb = get_imm_packet(r);
1937 if (unlikely(!skb)) { 1977 if (unlikely(!skb)) {
1938 no_mem: 1978no_mem:
1939 q->next_holdoff = NOMEM_INTR_DELAY; 1979 q->next_holdoff = NOMEM_INTR_DELAY;
1940 q->nomem++; 1980 q->nomem++;
1941 /* consume one credit since we tried */ 1981 /* consume one credit since we tried */
@@ -1945,53 +1985,29 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1945 q->imm_data++; 1985 q->imm_data++;
1946 ethpad = 0; 1986 ethpad = 0;
1947 } else if ((len = ntohl(r->len_cq)) != 0) { 1987 } else if ((len = ntohl(r->len_cq)) != 0) {
1948 struct sge_fl *fl = 1988 struct sge_fl *fl;
1949 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1950 1989
1951 if (fl->buf_size == RX_PAGE_SIZE) { 1990 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1952 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1991 if (fl->use_pages) {
1953 struct sge_fl_page *p = &sd->t.page; 1992 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
1954
1955 prefetch(p->va);
1956 prefetch(p->va + L1_CACHE_BYTES);
1957 1993
1994 prefetch(addr);
1995#if L1_CACHE_BYTES < 128
1996 prefetch(addr + L1_CACHE_BYTES);
1997#endif
1958 __refill_fl(adap, fl); 1998 __refill_fl(adap, fl);
1959 1999
1960 pci_unmap_single(adap->pdev, 2000 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
1961 pci_unmap_addr(sd, dma_addr), 2001 eth ? SGE_RX_DROP_THRES : 0);
1962 fl->buf_size, 2002 } else
1963 PCI_DMA_FROMDEVICE);
1964
1965 if (eth) {
1966 if (unlikely(fl->credits <
1967 SGE_RX_DROP_THRES))
1968 goto eth_recycle;
1969
1970 skb = alloc_skb(SKB_DATA_SIZE,
1971 GFP_ATOMIC);
1972 if (unlikely(!skb)) {
1973eth_recycle:
1974 q->rx_drops++;
1975 recycle_rx_buf(adap, fl,
1976 fl->cidx);
1977 goto eth_done;
1978 }
1979 } else {
1980 skb = alloc_skb(SKB_DATA_SIZE,
1981 GFP_ATOMIC);
1982 if (unlikely(!skb))
1983 goto no_mem;
1984 }
1985
1986 skb_data_init(skb, p, G_RSPD_LEN(len));
1987eth_done:
1988 fl->credits--;
1989 q->eth_pkts++;
1990 } else {
1991 fl->credits--;
1992 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2003 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1993 eth ? SGE_RX_DROP_THRES : 0); 2004 eth ? SGE_RX_DROP_THRES : 0);
1994 } 2005 if (unlikely(!skb)) {
2006 if (!eth)
2007 goto no_mem;
2008 q->rx_drops++;
2009 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2010 __skb_pull(skb, 2);
1995 2011
1996 if (++fl->cidx == fl->size) 2012 if (++fl->cidx == fl->size)
1997 fl->cidx = 0; 2013 fl->cidx = 0;
@@ -2016,20 +2032,15 @@ eth_done:
2016 q->credits = 0; 2032 q->credits = 0;
2017 } 2033 }
2018 2034
2019 if (skb) { 2035 if (likely(skb != NULL)) {
2020 /* Preserve the RSS info in csum & priority */
2021 skb->csum = rss_hi;
2022 skb->priority = rss_lo;
2023
2024 if (eth) 2036 if (eth)
2025 rx_eth(adap, q, skb, ethpad); 2037 rx_eth(adap, q, skb, ethpad);
2026 else { 2038 else {
2027 if (unlikely(r->rss_hdr.opcode == 2039 /* Preserve the RSS info in csum & priority */
2028 CPL_TRACE_PKT)) 2040 skb->csum = rss_hi;
2029 __skb_pull(skb, ethpad); 2041 skb->priority = rss_lo;
2030 2042 ngathered = rx_offload(&adap->tdev, q, skb,
2031 ngathered = rx_offload(&adap->tdev, q, 2043 offload_skbs,
2032 skb, offload_skbs,
2033 ngathered); 2044 ngathered);
2034 } 2045 }
2035 } 2046 }
@@ -2635,25 +2646,15 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2635 q->txq[TXQ_ETH].stop_thres = nports * 2646 q->txq[TXQ_ETH].stop_thres = nports *
2636 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2647 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2637 2648
2638 if (!is_offload(adapter)) { 2649#if FL0_PG_CHUNK_SIZE > 0
2639#ifdef USE_RX_PAGE 2650 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2640 q->fl[0].buf_size = RX_PAGE_SIZE;
2641#else 2651#else
2642 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + 2652 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2643 sizeof(struct cpl_rx_pkt);
2644#endif 2653#endif
2645 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + 2654 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2646 sizeof(struct cpl_rx_pkt); 2655 q->fl[1].buf_size = is_offload(adapter) ?
2647 } else { 2656 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2648#ifdef USE_RX_PAGE 2657 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2649 q->fl[0].buf_size = RX_PAGE_SIZE;
2650#else
2651 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2652 sizeof(struct cpl_rx_data);
2653#endif
2654 q->fl[1].buf_size = (16 * 1024) -
2655 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2656 }
2657 2658
2658 spin_lock(&adapter->sge.reg_lock); 2659 spin_lock(&adapter->sge.reg_lock);
2659 2660
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index fb485d0a43d8..dd3149d94ba8 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -847,6 +847,64 @@ static int t3_write_flash(struct adapter *adapter, unsigned int addr,
847 return 0; 847 return 0;
848} 848}
849 849
850/**
851 * t3_check_tpsram_version - read the tp sram version
852 * @adapter: the adapter
853 *
854 * Reads the protocol sram version from serial eeprom.
855 */
856int t3_check_tpsram_version(struct adapter *adapter)
857{
858 int ret;
859 u32 vers;
860 unsigned int major, minor;
861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
868
869 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 major = G_TP_VERSION_MAJOR(vers);
872 minor = G_TP_VERSION_MINOR(vers);
873
874 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
875 return 0;
876
877 return -EINVAL;
878}
879
880/**
881 * t3_check_tpsram - check if provided protocol SRAM
882 * is compatible with this driver
883 * @adapter: the adapter
884 * @tp_sram: the firmware image to write
885 * @size: image size
886 *
887 * Checks if an adapter's tp sram is compatible with the driver.
888 * Returns 0 if the versions are compatible, a negative error otherwise.
889 */
890int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
891{
892 u32 csum;
893 unsigned int i;
894 const u32 *p = (const u32 *)tp_sram;
895
896 /* Verify checksum */
897 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
898 csum += ntohl(p[i]);
899 if (csum != 0xffffffff) {
900 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
901 csum);
902 return -EINVAL;
903 }
904
905 return 0;
906}
907
850enum fw_version_type { 908enum fw_version_type {
851 FW_VERSION_N3, 909 FW_VERSION_N3,
852 FW_VERSION_T3 910 FW_VERSION_T3
@@ -921,7 +979,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
921/* 979/*
922 * t3_load_fw - download firmware 980 * t3_load_fw - download firmware
923 * @adapter: the adapter 981 * @adapter: the adapter
924 * @fw_data: the firrware image to write 982 * @fw_data: the firmware image to write
925 * @size: image size 983 * @size: image size
926 * 984 *
927 * Write the supplied firmware image to the card's serial flash. 985 * Write the supplied firmware image to the card's serial flash.
@@ -2362,7 +2420,7 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2362 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2420 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2363 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2421 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2364 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2422 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2365 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); 2423 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2366 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2424 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2367 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2425 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2368 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | 2426 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
@@ -2371,16 +2429,18 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2371 F_IPV6ENABLE | F_NICMODE); 2429 F_IPV6ENABLE | F_NICMODE);
2372 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); 2430 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2373 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); 2431 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2374 t3_set_reg_field(adap, A_TP_PARA_REG6, 2432 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2375 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND, 2433 adap->params.rev > 0 ? F_ENABLEESND :
2376 0); 2434 F_T3A_ENABLEESND);
2377 2435
2378 t3_set_reg_field(adap, A_TP_PC_CONFIG, 2436 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2379 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL, 2437 F_ENABLEEPCMDAFULL,
2380 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE | 2438 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2381 F_RXCONGESTIONMODE); 2439 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2382 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0); 2440 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2383 2441 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2442 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2443
2384 if (adap->params.rev > 0) { 2444 if (adap->params.rev > 0) {
2385 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); 2445 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2386 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO, 2446 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
@@ -2390,9 +2450,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2390 } else 2450 } else
2391 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); 2451 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2392 2452
2393 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212); 2453 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2394 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212); 2454 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2395 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212); 2455 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2456 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2396} 2457}
2397 2458
2398/* Desired TP timer resolution in usec */ 2459/* Desired TP timer resolution in usec */
@@ -2468,6 +2529,7 @@ int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2468 val |= F_RXCOALESCEENABLE; 2529 val |= F_RXCOALESCEENABLE;
2469 if (psh) 2530 if (psh)
2470 val |= F_RXCOALESCEPSHEN; 2531 val |= F_RXCOALESCEPSHEN;
2532 size = min(MAX_RX_COALESCING_LEN, size);
2471 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | 2533 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2472 V_MAXRXDATA(MAX_RX_COALESCING_LEN)); 2534 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2473 } 2535 }
@@ -2496,11 +2558,11 @@ static void __devinit init_mtus(unsigned short mtus[])
2496 * it can accomodate max size TCP/IP headers when SACK and timestamps 2558 * it can accomodate max size TCP/IP headers when SACK and timestamps
2497 * are enabled and still have at least 8 bytes of payload. 2559 * are enabled and still have at least 8 bytes of payload.
2498 */ 2560 */
2499 mtus[0] = 88; 2561 mtus[1] = 88;
2500 mtus[1] = 256; 2562 mtus[1] = 88;
2501 mtus[2] = 512; 2563 mtus[2] = 256;
2502 mtus[3] = 576; 2564 mtus[3] = 512;
2503 mtus[4] = 808; 2565 mtus[4] = 576;
2504 mtus[5] = 1024; 2566 mtus[5] = 1024;
2505 mtus[6] = 1280; 2567 mtus[6] = 1280;
2506 mtus[7] = 1492; 2568 mtus[7] = 1492;
@@ -2682,6 +2744,34 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
2682 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); 2744 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2683} 2745}
2684 2746
2747/**
2748 * t3_set_proto_sram - set the contents of the protocol sram
2749 * @adapter: the adapter
2750 * @data: the protocol image
2751 *
2752 * Write the contents of the protocol SRAM.
2753 */
2754int t3_set_proto_sram(struct adapter *adap, u8 *data)
2755{
2756 int i;
2757 u32 *buf = (u32 *)data;
2758
2759 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2760 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2761 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2762 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2763 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2764 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2765
2766 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2767 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2768 return -EIO;
2769 }
2770 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2771
2772 return 0;
2773}
2774
2685void t3_config_trace_filter(struct adapter *adapter, 2775void t3_config_trace_filter(struct adapter *adapter,
2686 const struct trace_params *tp, int filter_index, 2776 const struct trace_params *tp, int filter_index,
2687 int invert, int enable) 2777 int invert, int enable)
@@ -2802,7 +2892,7 @@ static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2802 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 2892 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2803 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | 2893 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2804 F_PORT0ACTIVE | F_ENFORCEPKT); 2894 F_PORT0ACTIVE | F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000); 2895 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
2806 } else { 2896 } else {
2807 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 2897 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2808 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 2898 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
@@ -3097,7 +3187,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3097 else 3187 else
3098 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN); 3188 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3099 3189
3100 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000); 3190 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3101 init_hw_for_avail_ports(adapter, adapter->params.nports); 3191 init_hw_for_avail_ports(adapter, adapter->params.nports);
3102 t3_sge_init(adapter, &adapter->params.sge); 3192 t3_sge_init(adapter, &adapter->params.sge);
3103 3193
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index b112317f033e..8eddd23a3a51 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 4 41#define FW_VERSION_MAJOR 4
42#define FW_VERSION_MINOR 0 42#define FW_VERSION_MINOR 1
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */