aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVipul Pandya <vipul@chelsio.com>2012-09-25 22:39:38 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-27 17:55:50 -0400
commit52367a763d8046190754ab43743e42638564a2d1 (patch)
treefd8ee7042d944f9b728443a89295ac583a73bf32 /drivers
parent5afc8b84eb7b29e4646d6e8ca7e6d7196031d6f7 (diff)
cxgb4/cxgb4vf: Code cleanup to enable T4 Configuration File support
This patch adds new enums and macros to enable T4 configuration file support. It also removes duplicate macro definitions. It fixes the build failure in cxgb4vf driver introduced because of old macro definition removal. It also performs SGE initialization based on T4 configuration file is provided or not. If it is provided then it uses the parameters provided in it otherwise it uses hard coded values. Signed-off-by: Jay Hernandez <jay@chelsio.com> Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c337
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h41
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
5 files changed, 365 insertions, 71 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 7de740a8b764..ae040cf255a4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -315,6 +315,9 @@ enum { /* adapter flags */
315 USING_MSI = (1 << 1), 315 USING_MSI = (1 << 1),
316 USING_MSIX = (1 << 2), 316 USING_MSIX = (1 << 2),
317 FW_OK = (1 << 4), 317 FW_OK = (1 << 4),
318 USING_SOFT_PARAMS = (1 << 6),
319 MASTER_PF = (1 << 7),
320 FW_OFLD_CONN = (1 << 9),
318}; 321};
319 322
320struct rx_sw_desc; 323struct rx_sw_desc;
@@ -467,6 +470,11 @@ struct sge {
467 u16 rdma_rxq[NCHAN]; 470 u16 rdma_rxq[NCHAN];
468 u16 timer_val[SGE_NTIMERS]; 471 u16 timer_val[SGE_NTIMERS];
469 u8 counter_val[SGE_NCOUNTERS]; 472 u8 counter_val[SGE_NCOUNTERS];
473 u32 fl_pg_order; /* large page allocation size */
474 u32 stat_len; /* length of status page at ring end */
475 u32 pktshift; /* padding between CPL & packet data */
476 u32 fl_align; /* response queue message alignment */
477 u32 fl_starve_thres; /* Free List starvation threshold */
470 unsigned int starve_thres; 478 unsigned int starve_thres;
471 u8 idma_state[2]; 479 u8 idma_state[2];
472 unsigned int egr_start; 480 unsigned int egr_start;
@@ -619,7 +627,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
619int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 627int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
620 struct net_device *dev, unsigned int iqid); 628 struct net_device *dev, unsigned int iqid);
621irqreturn_t t4_sge_intr_msix(int irq, void *cookie); 629irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
622void t4_sge_init(struct adapter *adap); 630int t4_sge_init(struct adapter *adap);
623void t4_sge_start(struct adapter *adap); 631void t4_sge_start(struct adapter *adap);
624void t4_sge_stop(struct adapter *adap); 632void t4_sge_stop(struct adapter *adap);
625extern int dbfifo_int_thresh; 633extern int dbfifo_int_thresh;
@@ -638,6 +646,14 @@ static inline unsigned int us_to_core_ticks(const struct adapter *adap,
638 return (us * adap->params.vpd.cclk) / 1000; 646 return (us * adap->params.vpd.cclk) / 1000;
639} 647}
640 648
649static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
650 unsigned int ticks)
651{
652 /* add Core Clock / 2 to round ticks to nearest uS */
653 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
654 adapter->params.vpd.cclk);
655}
656
641void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, 657void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
642 u32 val); 658 u32 val);
643 659
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1fde57d45318..3ecc087d732d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -68,9 +68,6 @@
68 */ 68 */
69#define RX_PKT_SKB_LEN 512 69#define RX_PKT_SKB_LEN 512
70 70
71/* Ethernet header padding prepended to RX_PKTs */
72#define RX_PKT_PAD 2
73
74/* 71/*
75 * Max number of Tx descriptors we clean up at a time. Should be modest as 72 * Max number of Tx descriptors we clean up at a time. Should be modest as
76 * freeing skbs isn't cheap and it happens while holding locks. We just need 73 * freeing skbs isn't cheap and it happens while holding locks. We just need
@@ -137,13 +134,6 @@
137 */ 134 */
138#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN 135#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
139 136
140enum {
141 /* packet alignment in FL buffers */
142 FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
143 /* egress status entry size */
144 STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
145};
146
147struct tx_sw_desc { /* SW state per Tx descriptor */ 137struct tx_sw_desc { /* SW state per Tx descriptor */
148 struct sk_buff *skb; 138 struct sk_buff *skb;
149 struct ulptx_sgl *sgl; 139 struct ulptx_sgl *sgl;
@@ -155,16 +145,57 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
155}; 145};
156 146
157/* 147/*
158 * The low bits of rx_sw_desc.dma_addr have special meaning. 148 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
149 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
150 * We could easily support more but there doesn't seem to be much need for
151 * that ...
152 */
153#define FL_MTU_SMALL 1500
154#define FL_MTU_LARGE 9000
155
156static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
157 unsigned int mtu)
158{
159 struct sge *s = &adapter->sge;
160
161 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
162}
163
164#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
165#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
166
167/*
168 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning. The hardware uses
169 * these to specify the buffer size as an index into the SGE Free List Buffer
170 * Size register array. We also use bit 4, when the buffer has been unmapped
171 * for DMA, but this is of course never sent to the hardware and is only used
172 * to prevent double unmappings. All of the above requires that the Free List
173 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
174 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
175 * Free List Buffer alignment is 32 bytes, this works out for us ...
159 */ 176 */
160enum { 177enum {
161 RX_LARGE_BUF = 1 << 0, /* buffer is larger than PAGE_SIZE */ 178 RX_BUF_FLAGS = 0x1f, /* bottom five bits are special */
162 RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */ 179 RX_BUF_SIZE = 0x0f, /* bottom three bits are for buf sizes */
180 RX_UNMAPPED_BUF = 0x10, /* buffer is not mapped */
181
182 /*
183 * XXX We shouldn't depend on being able to use these indices.
184 * XXX Especially when some other Master PF has initialized the
185 * XXX adapter or we use the Firmware Configuration File. We
186 * XXX should really search through the Host Buffer Size register
187 * XXX array for the appropriately sized buffer indices.
188 */
189 RX_SMALL_PG_BUF = 0x0, /* small (PAGE_SIZE) page buffer */
190 RX_LARGE_PG_BUF = 0x1, /* buffer large (FL_PG_ORDER) page buffer */
191
192 RX_SMALL_MTU_BUF = 0x2, /* small MTU buffer */
193 RX_LARGE_MTU_BUF = 0x3, /* large MTU buffer */
163}; 194};
164 195
165static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d) 196static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
166{ 197{
167 return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF); 198 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
168} 199}
169 200
170static inline bool is_buf_mapped(const struct rx_sw_desc *d) 201static inline bool is_buf_mapped(const struct rx_sw_desc *d)
@@ -392,14 +423,35 @@ static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
392 } 423 }
393} 424}
394 425
395static inline int get_buf_size(const struct rx_sw_desc *d) 426static inline int get_buf_size(struct adapter *adapter,
427 const struct rx_sw_desc *d)
396{ 428{
397#if FL_PG_ORDER > 0 429 struct sge *s = &adapter->sge;
398 return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) : 430 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
399 PAGE_SIZE; 431 int buf_size;
400#else 432
401 return PAGE_SIZE; 433 switch (rx_buf_size_idx) {
402#endif 434 case RX_SMALL_PG_BUF:
435 buf_size = PAGE_SIZE;
436 break;
437
438 case RX_LARGE_PG_BUF:
439 buf_size = PAGE_SIZE << s->fl_pg_order;
440 break;
441
442 case RX_SMALL_MTU_BUF:
443 buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
444 break;
445
446 case RX_LARGE_MTU_BUF:
447 buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
448 break;
449
450 default:
451 BUG_ON(1);
452 }
453
454 return buf_size;
403} 455}
404 456
405/** 457/**
@@ -418,7 +470,8 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
418 470
419 if (is_buf_mapped(d)) 471 if (is_buf_mapped(d))
420 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 472 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
421 get_buf_size(d), PCI_DMA_FROMDEVICE); 473 get_buf_size(adap, d),
474 PCI_DMA_FROMDEVICE);
422 put_page(d->page); 475 put_page(d->page);
423 d->page = NULL; 476 d->page = NULL;
424 if (++q->cidx == q->size) 477 if (++q->cidx == q->size)
@@ -444,7 +497,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
444 497
445 if (is_buf_mapped(d)) 498 if (is_buf_mapped(d))
446 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), 499 dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
447 get_buf_size(d), PCI_DMA_FROMDEVICE); 500 get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
448 d->page = NULL; 501 d->page = NULL;
449 if (++q->cidx == q->size) 502 if (++q->cidx == q->size)
450 q->cidx = 0; 503 q->cidx = 0;
@@ -485,6 +538,7 @@ static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
485static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, 538static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
486 gfp_t gfp) 539 gfp_t gfp)
487{ 540{
541 struct sge *s = &adap->sge;
488 struct page *pg; 542 struct page *pg;
489 dma_addr_t mapping; 543 dma_addr_t mapping;
490 unsigned int cred = q->avail; 544 unsigned int cred = q->avail;
@@ -493,25 +547,27 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
493 547
494 gfp |= __GFP_NOWARN | __GFP_COLD; 548 gfp |= __GFP_NOWARN | __GFP_COLD;
495 549
496#if FL_PG_ORDER > 0 550 if (s->fl_pg_order == 0)
551 goto alloc_small_pages;
552
497 /* 553 /*
498 * Prefer large buffers 554 * Prefer large buffers
499 */ 555 */
500 while (n) { 556 while (n) {
501 pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER); 557 pg = alloc_pages(gfp | __GFP_COMP, s->fl_pg_order);
502 if (unlikely(!pg)) { 558 if (unlikely(!pg)) {
503 q->large_alloc_failed++; 559 q->large_alloc_failed++;
504 break; /* fall back to single pages */ 560 break; /* fall back to single pages */
505 } 561 }
506 562
507 mapping = dma_map_page(adap->pdev_dev, pg, 0, 563 mapping = dma_map_page(adap->pdev_dev, pg, 0,
508 PAGE_SIZE << FL_PG_ORDER, 564 PAGE_SIZE << s->fl_pg_order,
509 PCI_DMA_FROMDEVICE); 565 PCI_DMA_FROMDEVICE);
510 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { 566 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
511 __free_pages(pg, FL_PG_ORDER); 567 __free_pages(pg, s->fl_pg_order);
512 goto out; /* do not try small pages for this error */ 568 goto out; /* do not try small pages for this error */
513 } 569 }
514 mapping |= RX_LARGE_BUF; 570 mapping |= RX_LARGE_PG_BUF;
515 *d++ = cpu_to_be64(mapping); 571 *d++ = cpu_to_be64(mapping);
516 572
517 set_rx_sw_desc(sd, pg, mapping); 573 set_rx_sw_desc(sd, pg, mapping);
@@ -525,8 +581,8 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
525 } 581 }
526 n--; 582 n--;
527 } 583 }
528#endif
529 584
585alloc_small_pages:
530 while (n--) { 586 while (n--) {
531 pg = __skb_alloc_page(gfp, NULL); 587 pg = __skb_alloc_page(gfp, NULL);
532 if (unlikely(!pg)) { 588 if (unlikely(!pg)) {
@@ -1519,6 +1575,8 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1519static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1575static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1520 const struct cpl_rx_pkt *pkt) 1576 const struct cpl_rx_pkt *pkt)
1521{ 1577{
1578 struct adapter *adapter = rxq->rspq.adap;
1579 struct sge *s = &adapter->sge;
1522 int ret; 1580 int ret;
1523 struct sk_buff *skb; 1581 struct sk_buff *skb;
1524 1582
@@ -1529,8 +1587,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1529 return; 1587 return;
1530 } 1588 }
1531 1589
1532 copy_frags(skb, gl, RX_PKT_PAD); 1590 copy_frags(skb, gl, s->pktshift);
1533 skb->len = gl->tot_len - RX_PKT_PAD; 1591 skb->len = gl->tot_len - s->pktshift;
1534 skb->data_len = skb->len; 1592 skb->data_len = skb->len;
1535 skb->truesize += skb->data_len; 1593 skb->truesize += skb->data_len;
1536 skb->ip_summed = CHECKSUM_UNNECESSARY; 1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1566,6 +1624,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1566 struct sk_buff *skb; 1624 struct sk_buff *skb;
1567 const struct cpl_rx_pkt *pkt; 1625 const struct cpl_rx_pkt *pkt;
1568 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1626 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627 struct sge *s = &q->adap->sge;
1569 1628
1570 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1629 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1571 return handle_trace_pkt(q->adap, si); 1630 return handle_trace_pkt(q->adap, si);
@@ -1585,7 +1644,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1585 return 0; 1644 return 0;
1586 } 1645 }
1587 1646
1588 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1647 __skb_pull(skb, s->pktshift); /* remove ethernet header padding */
1589 skb->protocol = eth_type_trans(skb, q->netdev); 1648 skb->protocol = eth_type_trans(skb, q->netdev);
1590 skb_record_rx_queue(skb, q->idx); 1649 skb_record_rx_queue(skb, q->idx);
1591 if (skb->dev->features & NETIF_F_RXHASH) 1650 if (skb->dev->features & NETIF_F_RXHASH)
@@ -1696,6 +1755,8 @@ static int process_responses(struct sge_rspq *q, int budget)
1696 int budget_left = budget; 1755 int budget_left = budget;
1697 const struct rsp_ctrl *rc; 1756 const struct rsp_ctrl *rc;
1698 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1757 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1758 struct adapter *adapter = q->adap;
1759 struct sge *s = &adapter->sge;
1699 1760
1700 while (likely(budget_left)) { 1761 while (likely(budget_left)) {
1701 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); 1762 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
@@ -1722,7 +1783,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1722 /* gather packet fragments */ 1783 /* gather packet fragments */
1723 for (frags = 0, fp = si.frags; ; frags++, fp++) { 1784 for (frags = 0, fp = si.frags; ; frags++, fp++) {
1724 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; 1785 rsd = &rxq->fl.sdesc[rxq->fl.cidx];
1725 bufsz = get_buf_size(rsd); 1786 bufsz = get_buf_size(adapter, rsd);
1726 fp->page = rsd->page; 1787 fp->page = rsd->page;
1727 fp->offset = q->offset; 1788 fp->offset = q->offset;
1728 fp->size = min(bufsz, len); 1789 fp->size = min(bufsz, len);
@@ -1747,7 +1808,7 @@ static int process_responses(struct sge_rspq *q, int budget)
1747 si.nfrags = frags + 1; 1808 si.nfrags = frags + 1;
1748 ret = q->handler(q, q->cur_desc, &si); 1809 ret = q->handler(q, q->cur_desc, &si);
1749 if (likely(ret == 0)) 1810 if (likely(ret == 0))
1750 q->offset += ALIGN(fp->size, FL_ALIGN); 1811 q->offset += ALIGN(fp->size, s->fl_align);
1751 else 1812 else
1752 restore_rx_bufs(&si, &rxq->fl, frags); 1813 restore_rx_bufs(&si, &rxq->fl, frags);
1753 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1814 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1983,6 +2044,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1983{ 2044{
1984 int ret, flsz = 0; 2045 int ret, flsz = 0;
1985 struct fw_iq_cmd c; 2046 struct fw_iq_cmd c;
2047 struct sge *s = &adap->sge;
1986 struct port_info *pi = netdev_priv(dev); 2048 struct port_info *pi = netdev_priv(dev);
1987 2049
1988 /* Size needs to be multiple of 16, including status entry. */ 2050 /* Size needs to be multiple of 16, including status entry. */
@@ -2015,11 +2077,11 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2015 fl->size = roundup(fl->size, 8); 2077 fl->size = roundup(fl->size, 8);
2016 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 2078 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2017 sizeof(struct rx_sw_desc), &fl->addr, 2079 sizeof(struct rx_sw_desc), &fl->addr,
2018 &fl->sdesc, STAT_LEN, NUMA_NO_NODE); 2080 &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2019 if (!fl->desc) 2081 if (!fl->desc)
2020 goto fl_nomem; 2082 goto fl_nomem;
2021 2083
2022 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); 2084 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2023 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | 2085 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2024 FW_IQ_CMD_FL0FETCHRO(1) | 2086 FW_IQ_CMD_FL0FETCHRO(1) |
2025 FW_IQ_CMD_FL0DATARO(1) | 2087 FW_IQ_CMD_FL0DATARO(1) |
@@ -2096,14 +2158,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2096{ 2158{
2097 int ret, nentries; 2159 int ret, nentries;
2098 struct fw_eq_eth_cmd c; 2160 struct fw_eq_eth_cmd c;
2161 struct sge *s = &adap->sge;
2099 struct port_info *pi = netdev_priv(dev); 2162 struct port_info *pi = netdev_priv(dev);
2100 2163
2101 /* Add status entries */ 2164 /* Add status entries */
2102 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2165 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2103 2166
2104 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2167 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2105 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2168 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2106 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2169 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2107 netdev_queue_numa_node_read(netdevq)); 2170 netdev_queue_numa_node_read(netdevq));
2108 if (!txq->q.desc) 2171 if (!txq->q.desc)
2109 return -ENOMEM; 2172 return -ENOMEM;
@@ -2149,10 +2212,11 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2149{ 2212{
2150 int ret, nentries; 2213 int ret, nentries;
2151 struct fw_eq_ctrl_cmd c; 2214 struct fw_eq_ctrl_cmd c;
2215 struct sge *s = &adap->sge;
2152 struct port_info *pi = netdev_priv(dev); 2216 struct port_info *pi = netdev_priv(dev);
2153 2217
2154 /* Add status entries */ 2218 /* Add status entries */
2155 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2219 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2156 2220
2157 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 2221 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2158 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 2222 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
@@ -2200,14 +2264,15 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2200{ 2264{
2201 int ret, nentries; 2265 int ret, nentries;
2202 struct fw_eq_ofld_cmd c; 2266 struct fw_eq_ofld_cmd c;
2267 struct sge *s = &adap->sge;
2203 struct port_info *pi = netdev_priv(dev); 2268 struct port_info *pi = netdev_priv(dev);
2204 2269
2205 /* Add status entries */ 2270 /* Add status entries */
2206 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2271 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2207 2272
2208 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2273 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2209 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2274 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2210 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN, 2275 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2211 NUMA_NO_NODE); 2276 NUMA_NO_NODE);
2212 if (!txq->q.desc) 2277 if (!txq->q.desc)
2213 return -ENOMEM; 2278 return -ENOMEM;
@@ -2251,8 +2316,10 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2251 2316
2252static void free_txq(struct adapter *adap, struct sge_txq *q) 2317static void free_txq(struct adapter *adap, struct sge_txq *q)
2253{ 2318{
2319 struct sge *s = &adap->sge;
2320
2254 dma_free_coherent(adap->pdev_dev, 2321 dma_free_coherent(adap->pdev_dev,
2255 q->size * sizeof(struct tx_desc) + STAT_LEN, 2322 q->size * sizeof(struct tx_desc) + s->stat_len,
2256 q->desc, q->phys_addr); 2323 q->desc, q->phys_addr);
2257 q->cntxt_id = 0; 2324 q->cntxt_id = 0;
2258 q->sdesc = NULL; 2325 q->sdesc = NULL;
@@ -2262,6 +2329,7 @@ static void free_txq(struct adapter *adap, struct sge_txq *q)
2262static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, 2329static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2263 struct sge_fl *fl) 2330 struct sge_fl *fl)
2264{ 2331{
2332 struct sge *s = &adap->sge;
2265 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; 2333 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2266 2334
2267 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; 2335 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
@@ -2276,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2276 2344
2277 if (fl) { 2345 if (fl) {
2278 free_rx_bufs(adap, fl, fl->avail); 2346 free_rx_bufs(adap, fl, fl->avail);
2279 dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN, 2347 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2280 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2281 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2282 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2408,18 +2476,112 @@ void t4_sge_stop(struct adapter *adap)
2408 * Performs SGE initialization needed every time after a chip reset. 2476 * Performs SGE initialization needed every time after a chip reset.
2409 * We do not initialize any of the queues here, instead the driver 2477 * We do not initialize any of the queues here, instead the driver
2410 * top-level must request them individually. 2478 * top-level must request them individually.
2479 *
2480 * Called in two different modes:
2481 *
2482 * 1. Perform actual hardware initialization and record hard-coded
2483 * parameters which were used. This gets used when we're the
2484 * Master PF and the Firmware Configuration File support didn't
2485 * work for some reason.
2486 *
2487 * 2. We're not the Master PF or initialization was performed with
2488 * a Firmware Configuration File. In this case we need to grab
2489 * any of the SGE operating parameters that we need to have in
2490 * order to do our job and make sure we can live with them ...
2411 */ 2491 */
2412void t4_sge_init(struct adapter *adap) 2492
2493static int t4_sge_init_soft(struct adapter *adap)
2413{ 2494{
2414 unsigned int i, v;
2415 struct sge *s = &adap->sge; 2495 struct sge *s = &adap->sge;
2416 unsigned int fl_align_log = ilog2(FL_ALIGN); 2496 u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2497 u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2498 u32 ingress_rx_threshold;
2417 2499
2418 t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK | 2500 /*
2419 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE, 2501 * Verify that CPL messages are going to the Ingress Queue for
2420 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) | 2502 * process_responses() and that only packet data is going to the
2421 RXPKTCPLMODE | 2503 * Free Lists.
2422 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0)); 2504 */
2505 if ((t4_read_reg(adap, SGE_CONTROL) & RXPKTCPLMODE_MASK) !=
2506 RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
2507 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2508 return -EINVAL;
2509 }
2510
2511 /*
2512 * Validate the Host Buffer Register Array indices that we want to
2513 * use ...
2514 *
2515 * XXX Note that we should really read through the Host Buffer Size
2516 * XXX register array and find the indices of the Buffer Sizes which
2517 * XXX meet our needs!
2518 */
2519 #define READ_FL_BUF(x) \
2520 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0+(x)*sizeof(u32))
2521
2522 fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2523 fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2524 fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2525 fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2526
2527 #undef READ_FL_BUF
2528
2529 if (fl_small_pg != PAGE_SIZE ||
2530 (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
2531 (fl_large_pg & (fl_large_pg-1)) != 0))) {
2532 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2533 fl_small_pg, fl_large_pg);
2534 return -EINVAL;
2535 }
2536 if (fl_large_pg)
2537 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2538
2539 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2540 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2541 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2542 fl_small_mtu, fl_large_mtu);
2543 return -EINVAL;
2544 }
2545
2546 /*
2547 * Retrieve our RX interrupt holdoff timer values and counter
2548 * threshold values from the SGE parameters.
2549 */
2550 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1);
2551 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3);
2552 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5);
2553 s->timer_val[0] = core_ticks_to_us(adap,
2554 TIMERVALUE0_GET(timer_value_0_and_1));
2555 s->timer_val[1] = core_ticks_to_us(adap,
2556 TIMERVALUE1_GET(timer_value_0_and_1));
2557 s->timer_val[2] = core_ticks_to_us(adap,
2558 TIMERVALUE2_GET(timer_value_2_and_3));
2559 s->timer_val[3] = core_ticks_to_us(adap,
2560 TIMERVALUE3_GET(timer_value_2_and_3));
2561 s->timer_val[4] = core_ticks_to_us(adap,
2562 TIMERVALUE4_GET(timer_value_4_and_5));
2563 s->timer_val[5] = core_ticks_to_us(adap,
2564 TIMERVALUE5_GET(timer_value_4_and_5));
2565
2566 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD);
2567 s->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
2568 s->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
2569 s->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
2570 s->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
2571
2572 return 0;
2573}
2574
2575static int t4_sge_init_hard(struct adapter *adap)
2576{
2577 struct sge *s = &adap->sge;
2578
2579 /*
2580 * Set up our basic SGE mode to deliver CPL messages to our Ingress
2581 * Queue and Packet Date to the Free List.
2582 */
2583 t4_set_reg_field(adap, SGE_CONTROL, RXPKTCPLMODE_MASK,
2584 RXPKTCPLMODE_MASK);
2423 2585
2424 /* 2586 /*
2425 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2587 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
@@ -2433,13 +2595,24 @@ void t4_sge_init(struct adapter *adap)
2433 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2595 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2434 F_ENABLE_DROP); 2596 F_ENABLE_DROP);
2435 2597
2436 for (i = v = 0; i < 32; i += 4) 2598 /*
2437 v |= (PAGE_SHIFT - 10) << i; 2599 * SGE_FL_BUFFER_SIZE0 (RX_SMALL_PG_BUF) is set up by
2438 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, v); 2600 * t4_fixup_host_params().
2439 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE); 2601 */
2440#if FL_PG_ORDER > 0 2602 s->fl_pg_order = FL_PG_ORDER;
2441 t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER); 2603 if (s->fl_pg_order)
2442#endif 2604 t4_write_reg(adap,
2605 SGE_FL_BUFFER_SIZE0+RX_LARGE_PG_BUF*sizeof(u32),
2606 PAGE_SIZE << FL_PG_ORDER);
2607 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_SMALL_MTU_BUF*sizeof(u32),
2608 FL_MTU_SMALL_BUFSIZE(adap));
2609 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0+RX_LARGE_MTU_BUF*sizeof(u32),
2610 FL_MTU_LARGE_BUFSIZE(adap));
2611
2612 /*
2613 * Note that the SGE Ingress Packet Count Interrupt Threshold and
2614 * Timer Holdoff values must be supplied by our caller.
2615 */
2443 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD, 2616 t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
2444 THRESHOLD_0(s->counter_val[0]) | 2617 THRESHOLD_0(s->counter_val[0]) |
2445 THRESHOLD_1(s->counter_val[1]) | 2618 THRESHOLD_1(s->counter_val[1]) |
@@ -2449,14 +2622,54 @@ void t4_sge_init(struct adapter *adap)
2449 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) | 2622 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
2450 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1]))); 2623 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
2451 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3, 2624 t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
2452 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) | 2625 TIMERVALUE2(us_to_core_ticks(adap, s->timer_val[2])) |
2453 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3]))); 2626 TIMERVALUE3(us_to_core_ticks(adap, s->timer_val[3])));
2454 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5, 2627 t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
2455 TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) | 2628 TIMERVALUE4(us_to_core_ticks(adap, s->timer_val[4])) |
2456 TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5]))); 2629 TIMERVALUE5(us_to_core_ticks(adap, s->timer_val[5])));
2630
2631 return 0;
2632}
2633
2634int t4_sge_init(struct adapter *adap)
2635{
2636 struct sge *s = &adap->sge;
2637 u32 sge_control;
2638 int ret;
2639
2640 /*
2641 * Ingress Padding Boundary and Egress Status Page Size are set up by
2642 * t4_fixup_host_params().
2643 */
2644 sge_control = t4_read_reg(adap, SGE_CONTROL);
2645 s->pktshift = PKTSHIFT_GET(sge_control);
2646 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2647 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) +
2648 X_INGPADBOUNDARY_SHIFT);
2649
2650 if (adap->flags & USING_SOFT_PARAMS)
2651 ret = t4_sge_init_soft(adap);
2652 else
2653 ret = t4_sge_init_hard(adap);
2654 if (ret < 0)
2655 return ret;
2656
2657 /*
2658 * A FL with <= fl_starve_thres buffers is starving and a periodic
2659 * timer will attempt to refill it. This needs to be larger than the
2660 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2661 * stuck waiting for new packets while the SGE is waiting for us to
2662 * give it more Free List entries. (Note that the SGE's Egress
2663 * Congestion Threshold is in units of 2 Free List pointers.)
2664 */
2665 s->fl_starve_thres
2666 = EGRTHRESHOLD_GET(t4_read_reg(adap, SGE_CONM_CTRL))*2 + 1;
2667
2457 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); 2668 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
2458 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); 2669 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
2459 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */ 2670 s->starve_thres = core_ticks_per_usec(adap) * 1000000; /* 1 s */
2460 s->idma_state[0] = s->idma_state[1] = 0; 2671 s->idma_state[0] = s->idma_state[1] = 0;
2461 spin_lock_init(&s->intrq_lock); 2672 spin_lock_init(&s->intrq_lock);
2673
2674 return 0;
2462} 2675}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 8e814bc46822..2767ca6dbe99 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -86,10 +86,17 @@
86#define CIDXINC_SHIFT 0 86#define CIDXINC_SHIFT 0
87#define CIDXINC(x) ((x) << CIDXINC_SHIFT) 87#define CIDXINC(x) ((x) << CIDXINC_SHIFT)
88 88
89#define X_RXPKTCPLMODE_SPLIT 1
90#define X_INGPADBOUNDARY_SHIFT 5
91
89#define SGE_CONTROL 0x1008 92#define SGE_CONTROL 0x1008
90#define DCASYSTYPE 0x00080000U 93#define DCASYSTYPE 0x00080000U
91#define RXPKTCPLMODE 0x00040000U 94#define RXPKTCPLMODE_MASK 0x00040000U
92#define EGRSTATUSPAGESIZE 0x00020000U 95#define RXPKTCPLMODE_SHIFT 18
96#define RXPKTCPLMODE(x) ((x) << RXPKTCPLMODE_SHIFT)
97#define EGRSTATUSPAGESIZE_MASK 0x00020000U
98#define EGRSTATUSPAGESIZE_SHIFT 17
99#define EGRSTATUSPAGESIZE(x) ((x) << EGRSTATUSPAGESIZE_SHIFT)
93#define PKTSHIFT_MASK 0x00001c00U 100#define PKTSHIFT_MASK 0x00001c00U
94#define PKTSHIFT_SHIFT 10 101#define PKTSHIFT_SHIFT 10
95#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 102#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
@@ -173,6 +180,12 @@
173#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT) 180#define THRESHOLD_3(x) ((x) << THRESHOLD_3_SHIFT)
174#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT) 181#define THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
175 182
183#define SGE_CONM_CTRL 0x1094
184#define EGRTHRESHOLD_MASK 0x00003f00U
185#define EGRTHRESHOLDshift 8
186#define EGRTHRESHOLD(x) ((x) << EGRTHRESHOLDshift)
187#define EGRTHRESHOLD_GET(x) (((x) & EGRTHRESHOLD_MASK) >> EGRTHRESHOLDshift)
188
176#define SGE_TIMER_VALUE_0_AND_1 0x10b8 189#define SGE_TIMER_VALUE_0_AND_1 0x10b8
177#define TIMERVALUE0_MASK 0xffff0000U 190#define TIMERVALUE0_MASK 0xffff0000U
178#define TIMERVALUE0_SHIFT 16 191#define TIMERVALUE0_SHIFT 16
@@ -184,7 +197,25 @@
184#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT) 197#define TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
185 198
186#define SGE_TIMER_VALUE_2_AND_3 0x10bc 199#define SGE_TIMER_VALUE_2_AND_3 0x10bc
200#define TIMERVALUE2_MASK 0xffff0000U
201#define TIMERVALUE2_SHIFT 16
202#define TIMERVALUE2(x) ((x) << TIMERVALUE2_SHIFT)
203#define TIMERVALUE2_GET(x) (((x) & TIMERVALUE2_MASK) >> TIMERVALUE2_SHIFT)
204#define TIMERVALUE3_MASK 0x0000ffffU
205#define TIMERVALUE3_SHIFT 0
206#define TIMERVALUE3(x) ((x) << TIMERVALUE3_SHIFT)
207#define TIMERVALUE3_GET(x) (((x) & TIMERVALUE3_MASK) >> TIMERVALUE3_SHIFT)
208
187#define SGE_TIMER_VALUE_4_AND_5 0x10c0 209#define SGE_TIMER_VALUE_4_AND_5 0x10c0
210#define TIMERVALUE4_MASK 0xffff0000U
211#define TIMERVALUE4_SHIFT 16
212#define TIMERVALUE4(x) ((x) << TIMERVALUE4_SHIFT)
213#define TIMERVALUE4_GET(x) (((x) & TIMERVALUE4_MASK) >> TIMERVALUE4_SHIFT)
214#define TIMERVALUE5_MASK 0x0000ffffU
215#define TIMERVALUE5_SHIFT 0
216#define TIMERVALUE5(x) ((x) << TIMERVALUE5_SHIFT)
217#define TIMERVALUE5_GET(x) (((x) & TIMERVALUE5_MASK) >> TIMERVALUE5_SHIFT)
218
188#define SGE_DEBUG_INDEX 0x10cc 219#define SGE_DEBUG_INDEX 0x10cc
189#define SGE_DEBUG_DATA_HIGH 0x10d0 220#define SGE_DEBUG_DATA_HIGH 0x10d0
190#define SGE_DEBUG_DATA_LOW 0x10d4 221#define SGE_DEBUG_DATA_LOW 0x10d4
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index ad53f796b574..94e3484b7d93 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -401,6 +401,14 @@ enum fw_caps_config_fcoe {
401 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002, 401 FW_CAPS_CONFIG_FCOE_TARGET = 0x00000002,
402}; 402};
403 403
404enum fw_memtype_cf {
405 FW_MEMTYPE_CF_EDC0 = 0x0,
406 FW_MEMTYPE_CF_EDC1 = 0x1,
407 FW_MEMTYPE_CF_EXTMEM = 0x2,
408 FW_MEMTYPE_CF_FLASH = 0x4,
409 FW_MEMTYPE_CF_INTERNAL = 0x5,
410};
411
404struct fw_caps_config_cmd { 412struct fw_caps_config_cmd {
405 __be32 op_to_write; 413 __be32 op_to_write;
406 __be32 retval_len16; 414 __be32 retval_len16;
@@ -416,10 +424,15 @@ struct fw_caps_config_cmd {
416 __be16 r4; 424 __be16 r4;
417 __be16 iscsicaps; 425 __be16 iscsicaps;
418 __be16 fcoecaps; 426 __be16 fcoecaps;
419 __be32 r5; 427 __be32 cfcsum;
420 __be64 r6; 428 __be32 finiver;
429 __be32 finicsum;
421}; 430};
422 431
432#define FW_CAPS_CONFIG_CMD_CFVALID (1U << 27)
433#define FW_CAPS_CONFIG_CMD_MEMTYPE_CF(x) ((x) << 24)
434#define FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(x) ((x) << 16)
435
423/* 436/*
424 * params command mnemonics 437 * params command mnemonics
425 */ 438 */
@@ -451,6 +464,7 @@ enum fw_params_param_dev {
451 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A, 464 FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A,
452 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 465 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
453 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 466 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
467 FW_PARAMS_PARAM_DEV_CF = 0x0D,
454}; 468};
455 469
456/* 470/*
@@ -492,6 +506,8 @@ enum fw_params_param_pfvf {
492 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A, 506 FW_PARAMS_PARAM_PFVF_IQFLINT_END = 0x2A,
493 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B, 507 FW_PARAMS_PARAM_PFVF_EQ_START = 0x2B,
494 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C, 508 FW_PARAMS_PARAM_PFVF_EQ_END = 0x2C,
509 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_START = 0x2D,
510 FW_PARAMS_PARAM_PFVF_ACTIVE_FILTER_END = 0x2E
495}; 511};
496 512
497/* 513/*
@@ -507,8 +523,16 @@ enum fw_params_param_dmaq {
507 523
508#define FW_PARAMS_MNEM(x) ((x) << 24) 524#define FW_PARAMS_MNEM(x) ((x) << 24)
509#define FW_PARAMS_PARAM_X(x) ((x) << 16) 525#define FW_PARAMS_PARAM_X(x) ((x) << 16)
510#define FW_PARAMS_PARAM_Y(x) ((x) << 8) 526#define FW_PARAMS_PARAM_Y_SHIFT 8
511#define FW_PARAMS_PARAM_Z(x) ((x) << 0) 527#define FW_PARAMS_PARAM_Y_MASK 0xffU
528#define FW_PARAMS_PARAM_Y(x) ((x) << FW_PARAMS_PARAM_Y_SHIFT)
529#define FW_PARAMS_PARAM_Y_GET(x) (((x) >> FW_PARAMS_PARAM_Y_SHIFT) &\
530 FW_PARAMS_PARAM_Y_MASK)
531#define FW_PARAMS_PARAM_Z_SHIFT 0
532#define FW_PARAMS_PARAM_Z_MASK 0xffu
533#define FW_PARAMS_PARAM_Z(x) ((x) << FW_PARAMS_PARAM_Z_SHIFT)
534#define FW_PARAMS_PARAM_Z_GET(x) (((x) >> FW_PARAMS_PARAM_Z_SHIFT) &\
535 FW_PARAMS_PARAM_Z_MASK)
512#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0) 536#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
513#define FW_PARAMS_PARAM_YZ(x) ((x) << 0) 537#define FW_PARAMS_PARAM_YZ(x) ((x) << 0)
514 538
@@ -1599,6 +1623,15 @@ struct fw_debug_cmd {
1599 } u; 1623 } u;
1600}; 1624};
1601 1625
1626#define FW_PCIE_FW_ERR (1U << 31)
1627#define FW_PCIE_FW_INIT (1U << 30)
1628#define FW_PCIE_FW_MASTER_VLD (1U << 15)
1629#define FW_PCIE_FW_MASTER_MASK 0x7
1630#define FW_PCIE_FW_MASTER_SHIFT 12
1631#define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT)
1632#define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \
1633 FW_PCIE_FW_MASTER_MASK)
1634
1602struct fw_hdr { 1635struct fw_hdr {
1603 u8 ver; 1636 u8 ver;
1604 u8 reserved1; 1637 u8 reserved1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8877fbfefb63..f16745f4b36b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2421,7 +2421,7 @@ int t4vf_sge_init(struct adapter *adapter)
2421 fl0, fl1); 2421 fl0, fl1);
2422 return -EINVAL; 2422 return -EINVAL;
2423 } 2423 }
2424 if ((sge_params->sge_control & RXPKTCPLMODE) == 0) { 2424 if ((sge_params->sge_control & RXPKTCPLMODE_MASK) == 0) {
2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); 2425 dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2426 return -EINVAL; 2426 return -EINVAL;
2427 } 2427 }
@@ -2431,7 +2431,8 @@ int t4vf_sge_init(struct adapter *adapter)
2431 */ 2431 */
2432 if (fl1) 2432 if (fl1)
2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2433 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT;
2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE) ? 128 : 64); 2434 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2435 ? 128 : 64);
2435 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2436 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control);
2436 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2437 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2437 SGE_INGPADBOUNDARY_SHIFT); 2438 SGE_INGPADBOUNDARY_SHIFT);