aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c93
2 files changed, 61 insertions, 40 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
301 301
302 /* Decoded Adapter Parameters.
303 */
304 u32 fl_pg_order; /* large page allocation size */
305 u32 stat_len; /* length of status page at ring end */
306 u32 pktshift; /* padding between CPL & packet data */
307 u32 fl_align; /* response queue message alignment */
308 u32 fl_starve_thres; /* Free List starvation threshold */
309
302 /* 310 /*
303 * Reverse maps from Absolute Queue IDs to associated queue pointers. 311 * Reverse maps from Absolute Queue IDs to associated queue pointers.
304 * The absolute Queue IDs are in a compact range which start at a 312 * The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..a18830d8aa6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -264,15 +256,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 256
265/** 257/**
266 * fl_starving - return whether a Free List is starving. 258 * fl_starving - return whether a Free List is starving.
259 * @adapter: pointer to the adapter
267 * @fl: the Free List 260 * @fl: the Free List
268 * 261 *
269 * Tests specified Free List to see whether the number of buffers 262 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 263 * available to the hardware has falled below our "starvation"
271 * threshold. 264 * threshold.
272 */ 265 */
273static inline bool fl_starving(const struct sge_fl *fl) 266static inline bool fl_starving(const struct adapter *adapter,
267 const struct sge_fl *fl)
274{ 268{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 269 const struct sge *s = &adapter->sge;
270
271 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 272}
277 273
278/** 274/**
@@ -457,13 +453,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 453
458/** 454/**
459 * get_buf_size - return the size of an RX Free List buffer. 455 * get_buf_size - return the size of an RX Free List buffer.
456 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 457 * @sdesc: pointer to the software buffer descriptor
461 */ 458 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 459static inline int get_buf_size(const struct adapter *adapter,
460 const struct rx_sw_desc *sdesc)
463{ 461{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 462 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 463
466 : PAGE_SIZE; 464 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
465 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 466}
468 467
469/** 468/**
@@ -483,7 +482,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 482
484 if (is_buf_mapped(sdesc)) 483 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 484 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 485 get_buf_size(adapter, sdesc),
486 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 487 put_page(sdesc->page);
488 sdesc->page = NULL; 488 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 489 if (++fl->cidx == fl->size)
@@ -511,7 +511,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 511
512 if (is_buf_mapped(sdesc)) 512 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 514 get_buf_size(adapter, sdesc),
515 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 516 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 517 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 518 fl->cidx = 0;
@@ -589,6 +590,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 590static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 591 int n, gfp_t gfp)
591{ 592{
593 struct sge *s = &adapter->sge;
592 struct page *page; 594 struct page *page;
593 dma_addr_t dma_addr; 595 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 596 unsigned int cred = fl->avail;
@@ -608,12 +610,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
608 * If we don't support large pages, drop directly into the small page 610 * If we don't support large pages, drop directly into the small page
609 * allocation code. 611 * allocation code.
610 */ 612 */
611 if (FL_PG_ORDER == 0) 613 if (s->fl_pg_order == 0)
612 goto alloc_small_pages; 614 goto alloc_small_pages;
613 615
614 while (n) { 616 while (n) {
615 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 617 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
616 FL_PG_ORDER); 618 s->fl_pg_order);
617 if (unlikely(!page)) { 619 if (unlikely(!page)) {
618 /* 620 /*
619 * We've failed inour attempt to allocate a "large 621 * We've failed inour attempt to allocate a "large
@@ -623,10 +625,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
623 fl->large_alloc_failed++; 625 fl->large_alloc_failed++;
624 break; 626 break;
625 } 627 }
626 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 628 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
627 629
628 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 630 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
629 PAGE_SIZE << FL_PG_ORDER, 631 PAGE_SIZE << s->fl_pg_order,
630 PCI_DMA_FROMDEVICE); 632 PCI_DMA_FROMDEVICE);
631 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 633 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
632 /* 634 /*
@@ -637,7 +639,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
637 * because DMA mapping resources are typically 639 * because DMA mapping resources are typically
638 * critical resources once they become scarse. 640 * critical resources once they become scarse.
639 */ 641 */
640 __free_pages(page, FL_PG_ORDER); 642 __free_pages(page, s->fl_pg_order);
641 goto out; 643 goto out;
642 } 644 }
643 dma_addr |= RX_LARGE_BUF; 645 dma_addr |= RX_LARGE_BUF;
@@ -693,7 +695,7 @@ out:
693 fl->pend_cred += cred; 695 fl->pend_cred += cred;
694 ring_fl_db(adapter, fl); 696 ring_fl_db(adapter, fl);
695 697
696 if (unlikely(fl_starving(fl))) { 698 if (unlikely(fl_starving(adapter, fl))) {
697 smp_wmb(); 699 smp_wmb();
698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 700 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
699 } 701 }
@@ -1468,6 +1470,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1468static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1470static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1469 const struct cpl_rx_pkt *pkt) 1471 const struct cpl_rx_pkt *pkt)
1470{ 1472{
1473 struct adapter *adapter = rxq->rspq.adapter;
1474 struct sge *s = &adapter->sge;
1471 int ret; 1475 int ret;
1472 struct sk_buff *skb; 1476 struct sk_buff *skb;
1473 1477
@@ -1478,8 +1482,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 return; 1482 return;
1479 } 1483 }
1480 1484
1481 copy_frags(skb, gl, PKTSHIFT); 1485 copy_frags(skb, gl, s->pktshift);
1482 skb->len = gl->tot_len - PKTSHIFT; 1486 skb->len = gl->tot_len - s->pktshift;
1483 skb->data_len = skb->len; 1487 skb->data_len = skb->len;
1484 skb->truesize += skb->data_len; 1488 skb->truesize += skb->data_len;
1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1489 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1520,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1516 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1520 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1517 (rspq->netdev->features & NETIF_F_RXCSUM); 1521 (rspq->netdev->features & NETIF_F_RXCSUM);
1518 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1522 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1523 struct adapter *adapter = rspq->adapter;
1524 struct sge *s = &adapter->sge;
1519 1525
1520 /* 1526 /*
1521 * If this is a good TCP packet and we have Generic Receive Offload 1527 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1543,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1537 rxq->stats.rx_drops++; 1543 rxq->stats.rx_drops++;
1538 return 0; 1544 return 0;
1539 } 1545 }
1540 __skb_pull(skb, PKTSHIFT); 1546 __skb_pull(skb, s->pktshift);
1541 skb->protocol = eth_type_trans(skb, rspq->netdev); 1547 skb->protocol = eth_type_trans(skb, rspq->netdev);
1542 skb_record_rx_queue(skb, rspq->idx); 1548 skb_record_rx_queue(skb, rspq->idx);
1543 rxq->stats.pkts++; 1549 rxq->stats.pkts++;
@@ -1648,6 +1654,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1648static int process_responses(struct sge_rspq *rspq, int budget) 1654static int process_responses(struct sge_rspq *rspq, int budget)
1649{ 1655{
1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1656 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1657 struct adapter *adapter = rspq->adapter;
1658 struct sge *s = &adapter->sge;
1651 int budget_left = budget; 1659 int budget_left = budget;
1652 1660
1653 while (likely(budget_left)) { 1661 while (likely(budget_left)) {
@@ -1697,7 +1705,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1697 BUG_ON(frag >= MAX_SKB_FRAGS); 1705 BUG_ON(frag >= MAX_SKB_FRAGS);
1698 BUG_ON(rxq->fl.avail == 0); 1706 BUG_ON(rxq->fl.avail == 0);
1699 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1707 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1700 bufsz = get_buf_size(sdesc); 1708 bufsz = get_buf_size(adapter, sdesc);
1701 fp->page = sdesc->page; 1709 fp->page = sdesc->page;
1702 fp->offset = rspq->offset; 1710 fp->offset = rspq->offset;
1703 fp->size = min(bufsz, len); 1711 fp->size = min(bufsz, len);
@@ -1726,7 +1734,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1726 */ 1734 */
1727 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1735 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1728 if (likely(ret == 0)) 1736 if (likely(ret == 0))
1729 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1737 rspq->offset += ALIGN(fp->size, s->fl_align);
1730 else 1738 else
1731 restore_rx_bufs(&gl, &rxq->fl, frag); 1739 restore_rx_bufs(&gl, &rxq->fl, frag);
1732 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1740 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1971,7 @@ static void sge_rx_timer_cb(unsigned long data)
1963 * schedule napi but the FL is no longer starving. 1971 * schedule napi but the FL is no longer starving.
1964 * No biggie. 1972 * No biggie.
1965 */ 1973 */
1966 if (fl_starving(fl)) { 1974 if (fl_starving(adapter, fl)) {
1967 struct sge_eth_rxq *rxq; 1975 struct sge_eth_rxq *rxq;
1968 1976
1969 rxq = container_of(fl, struct sge_eth_rxq, fl); 1977 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2055,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2047 int intr_dest, 2055 int intr_dest,
2048 struct sge_fl *fl, rspq_handler_t hnd) 2056 struct sge_fl *fl, rspq_handler_t hnd)
2049{ 2057{
2058 struct sge *s = &adapter->sge;
2050 struct port_info *pi = netdev_priv(dev); 2059 struct port_info *pi = netdev_priv(dev);
2051 struct fw_iq_cmd cmd, rpl; 2060 struct fw_iq_cmd cmd, rpl;
2052 int ret, iqandst, flsz = 0; 2061 int ret, iqandst, flsz = 0;
@@ -2117,7 +2126,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2117 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2126 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2118 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2127 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2119 sizeof(__be64), sizeof(struct rx_sw_desc), 2128 sizeof(__be64), sizeof(struct rx_sw_desc),
2120 &fl->addr, &fl->sdesc, STAT_LEN); 2129 &fl->addr, &fl->sdesc, s->stat_len);
2121 if (!fl->desc) { 2130 if (!fl->desc) {
2122 ret = -ENOMEM; 2131 ret = -ENOMEM;
2123 goto err; 2132 goto err;
@@ -2129,7 +2138,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2129 * free list ring) in Egress Queue Units. 2138 * free list ring) in Egress Queue Units.
2130 */ 2139 */
2131 flsz = (fl->size / FL_PER_EQ_UNIT + 2140 flsz = (fl->size / FL_PER_EQ_UNIT +
2132 STAT_LEN / EQ_UNIT); 2141 s->stat_len / EQ_UNIT);
2133 2142
2134 /* 2143 /*
2135 * Fill in all the relevant firmware Ingress Queue Command 2144 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2226,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2217 struct net_device *dev, struct netdev_queue *devq, 2226 struct net_device *dev, struct netdev_queue *devq,
2218 unsigned int iqid) 2227 unsigned int iqid)
2219{ 2228{
2229 struct sge *s = &adapter->sge;
2220 int ret, nentries; 2230 int ret, nentries;
2221 struct fw_eq_eth_cmd cmd, rpl; 2231 struct fw_eq_eth_cmd cmd, rpl;
2222 struct port_info *pi = netdev_priv(dev); 2232 struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2235,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2225 * Calculate the size of the hardware TX Queue (including the Status 2235 * Calculate the size of the hardware TX Queue (including the Status
2226 * Page on the end of the TX Queue) in units of TX Descriptors. 2236 * Page on the end of the TX Queue) in units of TX Descriptors.
2227 */ 2237 */
2228 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2238 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2229 2239
2230 /* 2240 /*
2231 * Allocate the hardware ring for the TX ring (with space for its 2241 * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2244,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2234 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2244 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2235 sizeof(struct tx_desc), 2245 sizeof(struct tx_desc),
2236 sizeof(struct tx_sw_desc), 2246 sizeof(struct tx_sw_desc),
2237 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2247 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2238 if (!txq->q.desc) 2248 if (!txq->q.desc)
2239 return -ENOMEM; 2249 return -ENOMEM;
2240 2250
@@ -2307,8 +2317,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2307 */ 2317 */
2308static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2318static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2309{ 2319{
2320 struct sge *s = &adapter->sge;
2321
2310 dma_free_coherent(adapter->pdev_dev, 2322 dma_free_coherent(adapter->pdev_dev,
2311 tq->size * sizeof(*tq->desc) + STAT_LEN, 2323 tq->size * sizeof(*tq->desc) + s->stat_len,
2312 tq->desc, tq->phys_addr); 2324 tq->desc, tq->phys_addr);
2313 tq->cntxt_id = 0; 2325 tq->cntxt_id = 0;
2314 tq->sdesc = NULL; 2326 tq->sdesc = NULL;
@@ -2322,6 +2334,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2322static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2334static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2323 struct sge_fl *fl) 2335 struct sge_fl *fl)
2324{ 2336{
2337 struct sge *s = &adapter->sge;
2325 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2338 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2326 2339
2327 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2340 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2350,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2337 if (fl) { 2350 if (fl) {
2338 free_rx_bufs(adapter, fl, fl->avail); 2351 free_rx_bufs(adapter, fl, fl->avail);
2339 dma_free_coherent(adapter->pdev_dev, 2352 dma_free_coherent(adapter->pdev_dev,
2340 fl->size * sizeof(*fl->desc) + STAT_LEN, 2353 fl->size * sizeof(*fl->desc) + s->stat_len,
2341 fl->desc, fl->addr); 2354 fl->desc, fl->addr);
2342 kfree(fl->sdesc); 2355 kfree(fl->sdesc);
2343 fl->sdesc = NULL; 2356 fl->sdesc = NULL;
@@ -2443,12 +2456,12 @@ int t4vf_sge_init(struct adapter *adapter)
2443 * Now translate the adapter parameters into our internal forms. 2456 * Now translate the adapter parameters into our internal forms.
2444 */ 2457 */
2445 if (fl1) 2458 if (fl1)
2446 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2459 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2447 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2460 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2448 ? 128 : 64); 2461 ? 128 : 64);
2449 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2462 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2450 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2463 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2451 SGE_INGPADBOUNDARY_SHIFT); 2464 SGE_INGPADBOUNDARY_SHIFT);
2452 2465
2453 /* 2466 /*
2454 * Set up tasklet timers. 2467 * Set up tasklet timers.