aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-12-08 20:39:29 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2014-12-08 20:39:29 -0500
commitba00410b8131b23edfb0e09f8b6dd26c8eb621fb (patch)
treec08504e4d2fa51ac91cef544f336d0169806c49f /drivers/net/ethernet/chelsio/cxgb4vf/sge.c
parent8ce74dd6057832618957fc2cbd38fa959c3a0a6c (diff)
parentaa583096d9767892983332e7c1a984bd17e3cd39 (diff)
Merge branch 'iov_iter' into for-next
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4vf/sge.c')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
1 files changed, 90 insertions, 46 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -102,12 +94,6 @@ enum {
102 MAX_TIMER_TX_RECLAIM = 100, 94 MAX_TIMER_TX_RECLAIM = 100,
103 95
104 /* 96 /*
105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
106 * timer will attempt to refill it.
107 */
108 FL_STARVE_THRES = 4,
109
110 /*
111 * Suspend an Ethernet TX queue with fewer available descriptors than 97 * Suspend an Ethernet TX queue with fewer available descriptors than
112 * this. We always want to have room for a maximum sized packet: 98 * this. We always want to have room for a maximum sized packet:
113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 250
265/** 251/**
266 * fl_starving - return whether a Free List is starving. 252 * fl_starving - return whether a Free List is starving.
253 * @adapter: pointer to the adapter
267 * @fl: the Free List 254 * @fl: the Free List
268 * 255 *
269 * Tests specified Free List to see whether the number of buffers 256 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 257 * available to the hardware has falled below our "starvation"
271 * threshold. 258 * threshold.
272 */ 259 */
273static inline bool fl_starving(const struct sge_fl *fl) 260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
274{ 262{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 266}
277 267
278/** 268/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 447
458/** 448/**
459 * get_buf_size - return the size of an RX Free List buffer. 449 * get_buf_size - return the size of an RX Free List buffer.
450 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 451 * @sdesc: pointer to the software buffer descriptor
461 */ 452 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
463{ 455{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 456 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 457
466 : PAGE_SIZE; 458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 460}
468 461
469/** 462/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 476
484 if (is_buf_mapped(sdesc)) 477 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 481 put_page(sdesc->page);
488 sdesc->page = NULL; 482 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 483 if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 505
512 if (is_buf_mapped(sdesc)) 506 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 510 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 511 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 512 fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 584static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 585 int n, gfp_t gfp)
591{ 586{
587 struct sge *s = &adapter->sge;
592 struct page *page; 588 struct page *page;
593 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 590 unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
608 * If we don't support large pages, drop directly into the small page 604 * If we don't support large pages, drop directly into the small page
609 * allocation code. 605 * allocation code.
610 */ 606 */
611 if (FL_PG_ORDER == 0) 607 if (s->fl_pg_order == 0)
612 goto alloc_small_pages; 608 goto alloc_small_pages;
613 609
614 while (n) { 610 while (n) {
615 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 611 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
616 FL_PG_ORDER); 612 s->fl_pg_order);
617 if (unlikely(!page)) { 613 if (unlikely(!page)) {
618 /* 614 /*
619 * We've failed inour attempt to allocate a "large 615 * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
623 fl->large_alloc_failed++; 619 fl->large_alloc_failed++;
624 break; 620 break;
625 } 621 }
626 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 622 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
627 623
628 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 624 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
629 PAGE_SIZE << FL_PG_ORDER, 625 PAGE_SIZE << s->fl_pg_order,
630 PCI_DMA_FROMDEVICE); 626 PCI_DMA_FROMDEVICE);
631 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 627 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
632 /* 628 /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
637 * because DMA mapping resources are typically 633 * because DMA mapping resources are typically
638 * critical resources once they become scarse. 634 * critical resources once they become scarse.
639 */ 635 */
640 __free_pages(page, FL_PG_ORDER); 636 __free_pages(page, s->fl_pg_order);
641 goto out; 637 goto out;
642 } 638 }
643 dma_addr |= RX_LARGE_BUF; 639 dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
693 fl->pend_cred += cred; 689 fl->pend_cred += cred;
694 ring_fl_db(adapter, fl); 690 ring_fl_db(adapter, fl);
695 691
696 if (unlikely(fl_starving(fl))) { 692 if (unlikely(fl_starving(adapter, fl))) {
697 smp_wmb(); 693 smp_wmb();
698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 694 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
699 } 695 }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1468static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1464static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1469 const struct cpl_rx_pkt *pkt) 1465 const struct cpl_rx_pkt *pkt)
1470{ 1466{
1467 struct adapter *adapter = rxq->rspq.adapter;
1468 struct sge *s = &adapter->sge;
1471 int ret; 1469 int ret;
1472 struct sk_buff *skb; 1470 struct sk_buff *skb;
1473 1471
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 return; 1476 return;
1479 } 1477 }
1480 1478
1481 copy_frags(skb, gl, PKTSHIFT); 1479 copy_frags(skb, gl, s->pktshift);
1482 skb->len = gl->tot_len - PKTSHIFT; 1480 skb->len = gl->tot_len - s->pktshift;
1483 skb->data_len = skb->len; 1481 skb->data_len = skb->len;
1484 skb->truesize += skb->data_len; 1482 skb->truesize += skb->data_len;
1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1516 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1514 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1517 (rspq->netdev->features & NETIF_F_RXCSUM); 1515 (rspq->netdev->features & NETIF_F_RXCSUM);
1518 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1516 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1517 struct adapter *adapter = rspq->adapter;
1518 struct sge *s = &adapter->sge;
1519 1519
1520 /* 1520 /*
1521 * If this is a good TCP packet and we have Generic Receive Offload 1521 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1537 rxq->stats.rx_drops++; 1537 rxq->stats.rx_drops++;
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 __skb_pull(skb, PKTSHIFT); 1540 __skb_pull(skb, s->pktshift);
1541 skb->protocol = eth_type_trans(skb, rspq->netdev); 1541 skb->protocol = eth_type_trans(skb, rspq->netdev);
1542 skb_record_rx_queue(skb, rspq->idx); 1542 skb_record_rx_queue(skb, rspq->idx);
1543 rxq->stats.pkts++; 1543 rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1648static int process_responses(struct sge_rspq *rspq, int budget) 1648static int process_responses(struct sge_rspq *rspq, int budget)
1649{ 1649{
1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1651 struct adapter *adapter = rspq->adapter;
1652 struct sge *s = &adapter->sge;
1651 int budget_left = budget; 1653 int budget_left = budget;
1652 1654
1653 while (likely(budget_left)) { 1655 while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1697 BUG_ON(frag >= MAX_SKB_FRAGS); 1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1698 BUG_ON(rxq->fl.avail == 0); 1700 BUG_ON(rxq->fl.avail == 0);
1699 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1700 bufsz = get_buf_size(sdesc); 1702 bufsz = get_buf_size(adapter, sdesc);
1701 fp->page = sdesc->page; 1703 fp->page = sdesc->page;
1702 fp->offset = rspq->offset; 1704 fp->offset = rspq->offset;
1703 fp->size = min(bufsz, len); 1705 fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1726 */ 1728 */
1727 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1728 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1729 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1731 rspq->offset += ALIGN(fp->size, s->fl_align);
1730 else 1732 else
1731 restore_rx_bufs(&gl, &rxq->fl, frag); 1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1732 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
1963 * schedule napi but the FL is no longer starving. 1965 * schedule napi but the FL is no longer starving.
1964 * No biggie. 1966 * No biggie.
1965 */ 1967 */
1966 if (fl_starving(fl)) { 1968 if (fl_starving(adapter, fl)) {
1967 struct sge_eth_rxq *rxq; 1969 struct sge_eth_rxq *rxq;
1968 1970
1969 rxq = container_of(fl, struct sge_eth_rxq, fl); 1971 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2047 int intr_dest, 2049 int intr_dest,
2048 struct sge_fl *fl, rspq_handler_t hnd) 2050 struct sge_fl *fl, rspq_handler_t hnd)
2049{ 2051{
2052 struct sge *s = &adapter->sge;
2050 struct port_info *pi = netdev_priv(dev); 2053 struct port_info *pi = netdev_priv(dev);
2051 struct fw_iq_cmd cmd, rpl; 2054 struct fw_iq_cmd cmd, rpl;
2052 int ret, iqandst, flsz = 0; 2055 int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2117 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2120 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2118 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2121 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2119 sizeof(__be64), sizeof(struct rx_sw_desc), 2122 sizeof(__be64), sizeof(struct rx_sw_desc),
2120 &fl->addr, &fl->sdesc, STAT_LEN); 2123 &fl->addr, &fl->sdesc, s->stat_len);
2121 if (!fl->desc) { 2124 if (!fl->desc) {
2122 ret = -ENOMEM; 2125 ret = -ENOMEM;
2123 goto err; 2126 goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2129 * free list ring) in Egress Queue Units. 2132 * free list ring) in Egress Queue Units.
2130 */ 2133 */
2131 flsz = (fl->size / FL_PER_EQ_UNIT + 2134 flsz = (fl->size / FL_PER_EQ_UNIT +
2132 STAT_LEN / EQ_UNIT); 2135 s->stat_len / EQ_UNIT);
2133 2136
2134 /* 2137 /*
2135 * Fill in all the relevant firmware Ingress Queue Command 2138 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2217 struct net_device *dev, struct netdev_queue *devq, 2220 struct net_device *dev, struct netdev_queue *devq,
2218 unsigned int iqid) 2221 unsigned int iqid)
2219{ 2222{
2223 struct sge *s = &adapter->sge;
2220 int ret, nentries; 2224 int ret, nentries;
2221 struct fw_eq_eth_cmd cmd, rpl; 2225 struct fw_eq_eth_cmd cmd, rpl;
2222 struct port_info *pi = netdev_priv(dev); 2226 struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2225 * Calculate the size of the hardware TX Queue (including the Status 2229 * Calculate the size of the hardware TX Queue (including the Status
2226 * Page on the end of the TX Queue) in units of TX Descriptors. 2230 * Page on the end of the TX Queue) in units of TX Descriptors.
2227 */ 2231 */
2228 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2232 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2229 2233
2230 /* 2234 /*
2231 * Allocate the hardware ring for the TX ring (with space for its 2235 * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2234 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2238 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2235 sizeof(struct tx_desc), 2239 sizeof(struct tx_desc),
2236 sizeof(struct tx_sw_desc), 2240 sizeof(struct tx_sw_desc),
2237 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2241 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2238 if (!txq->q.desc) 2242 if (!txq->q.desc)
2239 return -ENOMEM; 2243 return -ENOMEM;
2240 2244
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2307 */ 2311 */
2308static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2312static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2309{ 2313{
2314 struct sge *s = &adapter->sge;
2315
2310 dma_free_coherent(adapter->pdev_dev, 2316 dma_free_coherent(adapter->pdev_dev,
2311 tq->size * sizeof(*tq->desc) + STAT_LEN, 2317 tq->size * sizeof(*tq->desc) + s->stat_len,
2312 tq->desc, tq->phys_addr); 2318 tq->desc, tq->phys_addr);
2313 tq->cntxt_id = 0; 2319 tq->cntxt_id = 0;
2314 tq->sdesc = NULL; 2320 tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2322static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2328static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2323 struct sge_fl *fl) 2329 struct sge_fl *fl)
2324{ 2330{
2331 struct sge *s = &adapter->sge;
2325 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2332 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2326 2333
2327 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2334 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2337 if (fl) { 2344 if (fl) {
2338 free_rx_bufs(adapter, fl, fl->avail); 2345 free_rx_bufs(adapter, fl, fl->avail);
2339 dma_free_coherent(adapter->pdev_dev, 2346 dma_free_coherent(adapter->pdev_dev,
2340 fl->size * sizeof(*fl->desc) + STAT_LEN, 2347 fl->size * sizeof(*fl->desc) + s->stat_len,
2341 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2342 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2343 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
2423 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2430 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2424 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2431 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2425 struct sge *s = &adapter->sge; 2432 struct sge *s = &adapter->sge;
2433 unsigned int ingpadboundary, ingpackboundary;
2426 2434
2427 /* 2435 /*
2428 * Start by vetting the basic SGE parameters which have been set up by 2436 * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
2443 * Now translate the adapter parameters into our internal forms. 2451 * Now translate the adapter parameters into our internal forms.
2444 */ 2452 */
2445 if (fl1) 2453 if (fl1)
2446 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2454 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2447 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2455 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2448 ? 128 : 64); 2456 ? 128 : 64);
2449 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2457 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2450 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2458
2451 SGE_INGPADBOUNDARY_SHIFT); 2459 /* T4 uses a single control field to specify both the PCIe Padding and
2460 * Packing Boundary. T5 introduced the ability to specify these
2461 * separately. The actual Ingress Packet Data alignment boundary
2462 * within Packed Buffer Mode is the maximum of these two
2463 * specifications. (Note that it makes no real practical sense to
2464 * have the Pading Boudary be larger than the Packing Boundary but you
2465 * could set the chip up that way and, in fact, legacy T4 code would
2466 * end doing this because it would initialize the Padding Boundary and
2467 * leave the Packing Boundary initialized to 0 (16 bytes).)
2468 */
2469 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2470 X_INGPADBOUNDARY_SHIFT);
2471 if (is_t4(adapter->params.chip)) {
2472 s->fl_align = ingpadboundary;
2473 } else {
2474 /* T5 has a different interpretation of one of the PCIe Packing
2475 * Boundary values.
2476 */
2477 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2478 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2479 ingpackboundary = 16;
2480 else
2481 ingpackboundary = 1 << (ingpackboundary +
2482 INGPACKBOUNDARY_SHIFT_X);
2483
2484 s->fl_align = max(ingpadboundary, ingpackboundary);
2485 }
2486
2487 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2488 * timer will attempt to refill it. This needs to be larger than the
2489 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2490 * stuck waiting for new packets while the SGE is waiting for us to
2491 * give it more Free List entries. (Note that the SGE's Egress
2492 * Congestion Threshold is in units of 2 Free List pointers.)
2493 */
2494 s->fl_starve_thres
2495 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
2452 2496
2453 /* 2497 /*
2454 * Set up tasklet timers. 2498 * Set up tasklet timers.