aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-11-14 01:01:12 -0500
committerDavid S. Miller <davem@davemloft.net>2014-11-14 01:01:12 -0500
commit076ce4482569ea1a2c27b4ca71a309adaf91d398 (patch)
tree2ae9e42612f35be897f190983fc292d7af781cd2 /drivers/net/ethernet/chelsio
parentd649a7a81f3b5bacb1d60abd7529894d8234a666 (diff)
parentb23dc5a7cc6ebc9a0d57351da7a0e8454c9ffea3 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/chelsio/cxgb4vf/sge.c drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c sge.c was overlapping two changes, one to use the new __dev_alloc_page() in net-next, and one to use s->fl_pg_order in net. ixgbe_phy.c was a set of overlapping whitespace changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/chelsio')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
8 files changed, 230 insertions, 66 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 6fe300e316c3..cca604994003 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -79,8 +79,9 @@ static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
79 app.protocol = dcb->app_priority[i].protocolid; 79 app.protocol = dcb->app_priority[i].protocolid;
80 80
81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) { 81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
82 app.priority = dcb->app_priority[i].user_prio_map;
82 app.selector = dcb->app_priority[i].sel_field + 1; 83 app.selector = dcb->app_priority[i].sel_field + 1;
83 err = dcb_ieee_setapp(dev, &app); 84 err = dcb_ieee_delapp(dev, &app);
84 } else { 85 } else {
85 app.selector = !!(dcb->app_priority[i].sel_field); 86 app.selector = !!(dcb->app_priority[i].sel_field);
86 err = dcb_setapp(dev, &app); 87 err = dcb_setapp(dev, &app);
@@ -122,7 +123,11 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
122 case CXGB4_DCB_INPUT_FW_ENABLED: { 123 case CXGB4_DCB_INPUT_FW_ENABLED: {
123 /* we're going to use Firmware DCB */ 124 /* we're going to use Firmware DCB */
124 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 125 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
125 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 126 dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
127 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
128 dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
129 else
130 dcb->supported |= DCB_CAP_DCBX_VER_CEE;
126 break; 131 break;
127 } 132 }
128 133
@@ -436,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
436 *up_tc_map = (1 << tc); 441 *up_tc_map = (1 << tc);
437 442
438 /* prio_type is link strict */ 443 /* prio_type is link strict */
439 *prio_type = 0x2; 444 if (*pgid != 0xF)
445 *prio_type = 0x2;
440} 446}
441 447
442static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, 448static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
443 u8 *prio_type, u8 *pgid, u8 *bw_per, 449 u8 *prio_type, u8 *pgid, u8 *bw_per,
444 u8 *up_tc_map) 450 u8 *up_tc_map)
445{ 451{
446 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); 452 /* tc 0 is written at MSB position */
453 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
454 up_tc_map, 1);
447} 455}
448 456
449 457
@@ -451,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
451 u8 *prio_type, u8 *pgid, u8 *bw_per, 459 u8 *prio_type, u8 *pgid, u8 *bw_per,
452 u8 *up_tc_map) 460 u8 *up_tc_map)
453{ 461{
454 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); 462 /* tc 0 is written at MSB position */
463 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
464 up_tc_map, 0);
455} 465}
456 466
457static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, 467static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -461,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
461 struct fw_port_cmd pcmd; 471 struct fw_port_cmd pcmd;
462 struct port_info *pi = netdev2pinfo(dev); 472 struct port_info *pi = netdev2pinfo(dev);
463 struct adapter *adap = pi->adapter; 473 struct adapter *adap = pi->adapter;
474 int fw_tc = 7 - tc;
464 u32 _pgid; 475 u32 _pgid;
465 int err; 476 int err;
466 477
@@ -479,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
479 } 490 }
480 491
481 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); 492 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
482 _pgid &= ~(0xF << (tc * 4)); 493 _pgid &= ~(0xF << (fw_tc * 4));
483 _pgid |= pgid << (tc * 4); 494 _pgid |= pgid << (fw_tc * 4);
484 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); 495 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
485 496
486 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); 497 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -593,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
593 priority >= CXGB4_MAX_PRIORITY) 604 priority >= CXGB4_MAX_PRIORITY)
594 *pfccfg = 0; 605 *pfccfg = 0;
595 else 606 else
596 *pfccfg = (pi->dcb.pfcen >> priority) & 1; 607 *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
597} 608}
598 609
599/* Enable/disable Priority Pause Frames for the specified Traffic Class 610/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -618,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
618 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; 629 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
619 630
620 if (pfccfg) 631 if (pfccfg)
621 pcmd.u.dcb.pfc.pfcen |= (1 << priority); 632 pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
622 else 633 else
623 pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); 634 pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
624 635
625 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 636 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
626 if (err != FW_PORT_DCB_CFG_SUCCESS) { 637 if (err != FW_PORT_DCB_CFG_SUCCESS) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 91dbf98036cc..5cc5e19286a1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2914int t4_sge_init(struct adapter *adap) 2914int t4_sge_init(struct adapter *adap)
2915{ 2915{
2916 struct sge *s = &adap->sge; 2916 struct sge *s = &adap->sge;
2917 u32 sge_control, sge_conm_ctrl; 2917 u32 sge_control, sge_control2, sge_conm_ctrl;
2918 unsigned int ingpadboundary, ingpackboundary;
2918 int ret, egress_threshold; 2919 int ret, egress_threshold;
2919 2920
2920 /* 2921 /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
2924 sge_control = t4_read_reg(adap, SGE_CONTROL); 2925 sge_control = t4_read_reg(adap, SGE_CONTROL);
2925 s->pktshift = PKTSHIFT_GET(sge_control); 2926 s->pktshift = PKTSHIFT_GET(sge_control);
2926 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2927 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2927 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + 2928
2928 X_INGPADBOUNDARY_SHIFT); 2929 /* T4 uses a single control field to specify both the PCIe Padding and
2930 * Packing Boundary. T5 introduced the ability to specify these
2931 * separately. The actual Ingress Packet Data alignment boundary
2932 * within Packed Buffer Mode is the maximum of these two
2933 * specifications.
2934 */
2935 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
2936 X_INGPADBOUNDARY_SHIFT);
2937 if (is_t4(adap->params.chip)) {
2938 s->fl_align = ingpadboundary;
2939 } else {
2940 /* T5 has a different interpretation of one of the PCIe Packing
2941 * Boundary values.
2942 */
2943 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2944 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2945 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2946 ingpackboundary = 16;
2947 else
2948 ingpackboundary = 1 << (ingpackboundary +
2949 INGPACKBOUNDARY_SHIFT_X);
2950
2951 s->fl_align = max(ingpadboundary, ingpackboundary);
2952 }
2929 2953
2930 if (adap->flags & USING_SOFT_PARAMS) 2954 if (adap->flags & USING_SOFT_PARAMS)
2931 ret = t4_sge_init_soft(adap); 2955 ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2bb4efa7db98..4d32df5041f6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3130,12 +3130,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3130 HOSTPAGESIZEPF6(sge_hps) | 3130 HOSTPAGESIZEPF6(sge_hps) |
3131 HOSTPAGESIZEPF7(sge_hps)); 3131 HOSTPAGESIZEPF7(sge_hps));
3132 3132
3133 t4_set_reg_field(adap, SGE_CONTROL, 3133 if (is_t4(adap->params.chip)) {
3134 INGPADBOUNDARY_MASK | 3134 t4_set_reg_field(adap, SGE_CONTROL,
3135 EGRSTATUSPAGESIZE_MASK, 3135 INGPADBOUNDARY_MASK |
3136 INGPADBOUNDARY(fl_align_log - 5) | 3136 EGRSTATUSPAGESIZE_MASK,
3137 EGRSTATUSPAGESIZE(stat_len != 64)); 3137 INGPADBOUNDARY(fl_align_log - 5) |
3138 3138 EGRSTATUSPAGESIZE(stat_len != 64));
3139 } else {
3140 /* T5 introduced the separation of the Free List Padding and
3141 * Packing Boundaries. Thus, we can select a smaller Padding
3142 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3143 * Bandwidth, and use a Packing Boundary which is large enough
3144 * to avoid false sharing between CPUs, etc.
3145 *
3146 * For the PCI Link, the smaller the Padding Boundary the
3147 * better. For the Memory Controller, a smaller Padding
3148 * Boundary is better until we cross under the Memory Line
3149 * Size (the minimum unit of transfer to/from Memory). If we
3150 * have a Padding Boundary which is smaller than the Memory
3151 * Line Size, that'll involve a Read-Modify-Write cycle on the
3152 * Memory Controller which is never good. For T5 the smallest
3153 * Padding Boundary which we can select is 32 bytes which is
3154 * larger than any known Memory Controller Line Size so we'll
3155 * use that.
3156 *
3157 * T5 has a different interpretation of the "0" value for the
3158 * Packing Boundary. This corresponds to 16 bytes instead of
3159 * the expected 32 bytes. We never have a Packing Boundary
3160 * less than 32 bytes so we can't use that special value but
3161 * on the other hand, if we wanted 32 bytes, the best we can
3162 * really do is 64 bytes.
3163 */
3164 if (fl_align <= 32) {
3165 fl_align = 64;
3166 fl_align_log = 6;
3167 }
3168 t4_set_reg_field(adap, SGE_CONTROL,
3169 INGPADBOUNDARY_MASK |
3170 EGRSTATUSPAGESIZE_MASK,
3171 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3172 EGRSTATUSPAGESIZE(stat_len != 64));
3173 t4_set_reg_field(adap, SGE_CONTROL2_A,
3174 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3175 INGPACKBOUNDARY_V(fl_align_log -
3176 INGPACKBOUNDARY_SHIFT_X));
3177 }
3139 /* 3178 /*
3140 * Adjust various SGE Free List Host Buffer Sizes. 3179 * Adjust various SGE Free List Host Buffer Sizes.
3141 * 3180 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index c8eb7ba225e1..ccdf8a7f4916 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
95#define X_INGPADBOUNDARY_SHIFT 5 95#define X_INGPADBOUNDARY_SHIFT 5
96 96
97#define SGE_CONTROL 0x1008 97#define SGE_CONTROL 0x1008
98#define SGE_CONTROL2_A 0x1124
98#define DCASYSTYPE 0x00080000U 99#define DCASYSTYPE 0x00080000U
99#define RXPKTCPLMODE_MASK 0x00040000U 100#define RXPKTCPLMODE_MASK 0x00040000U
100#define RXPKTCPLMODE_SHIFT 18 101#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
106#define PKTSHIFT_SHIFT 10 107#define PKTSHIFT_SHIFT 10
107#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
108#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
110#define INGPCIEBOUNDARY_32B_X 0
109#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define INGPCIEBOUNDARY_MASK 0x00000380U
110#define INGPCIEBOUNDARY_SHIFT 7 112#define INGPCIEBOUNDARY_SHIFT 7
111#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
114#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
115#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
116 >> INGPADBOUNDARY_SHIFT) 118 >> INGPADBOUNDARY_SHIFT)
119#define INGPACKBOUNDARY_16B_X 0
120#define INGPACKBOUNDARY_SHIFT_X 5
121
122#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M)
117#define EGRPCIEBOUNDARY_MASK 0x0000000eU 127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
118#define EGRPCIEBOUNDARY_SHIFT 1 128#define EGRPCIEBOUNDARY_SHIFT 1
119#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) 129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
301 301
302 /* Decoded Adapter Parameters.
303 */
304 u32 fl_pg_order; /* large page allocation size */
305 u32 stat_len; /* length of status page at ring end */
306 u32 pktshift; /* padding between CPL & packet data */
307 u32 fl_align; /* response queue message alignment */
308 u32 fl_starve_thres; /* Free List starvation threshold */
309
302 /* 310 /*
303 * Reverse maps from Absolute Queue IDs to associated queue pointers. 311 * Reverse maps from Absolute Queue IDs to associated queue pointers.
304 * The absolute Queue IDs are in a compact range which start at a 312 * The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index aff6d37f2676..50b1b34bde6d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -102,12 +94,6 @@ enum {
102 MAX_TIMER_TX_RECLAIM = 100, 94 MAX_TIMER_TX_RECLAIM = 100,
103 95
104 /* 96 /*
105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
106 * timer will attempt to refill it.
107 */
108 FL_STARVE_THRES = 4,
109
110 /*
111 * Suspend an Ethernet TX queue with fewer available descriptors than 97 * Suspend an Ethernet TX queue with fewer available descriptors than
112 * this. We always want to have room for a maximum sized packet: 98 * this. We always want to have room for a maximum sized packet:
113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 250
265/** 251/**
266 * fl_starving - return whether a Free List is starving. 252 * fl_starving - return whether a Free List is starving.
253 * @adapter: pointer to the adapter
267 * @fl: the Free List 254 * @fl: the Free List
268 * 255 *
269 * Tests specified Free List to see whether the number of buffers 256 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 257 * available to the hardware has falled below our "starvation"
271 * threshold. 258 * threshold.
272 */ 259 */
273static inline bool fl_starving(const struct sge_fl *fl) 260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
274{ 262{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 266}
277 267
278/** 268/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 447
458/** 448/**
459 * get_buf_size - return the size of an RX Free List buffer. 449 * get_buf_size - return the size of an RX Free List buffer.
450 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 451 * @sdesc: pointer to the software buffer descriptor
461 */ 452 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
463{ 455{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 456 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 457
466 : PAGE_SIZE; 458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 460}
468 461
469/** 462/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 476
484 if (is_buf_mapped(sdesc)) 477 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 481 put_page(sdesc->page);
488 sdesc->page = NULL; 482 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 483 if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 505
512 if (is_buf_mapped(sdesc)) 506 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 510 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 511 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 512 fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 584static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 585 int n, gfp_t gfp)
591{ 586{
587 struct sge *s = &adapter->sge;
592 struct page *page; 588 struct page *page;
593 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 590 unsigned int cred = fl->avail;
@@ -610,11 +606,11 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
610 * If we don't support large pages, drop directly into the small page 606 * If we don't support large pages, drop directly into the small page
611 * allocation code. 607 * allocation code.
612 */ 608 */
613 if (FL_PG_ORDER == 0) 609 if (s->fl_pg_order == 0)
614 goto alloc_small_pages; 610 goto alloc_small_pages;
615 611
616 while (n) { 612 while (n) {
617 page = __dev_alloc_pages(gfp, FL_PG_ORDER); 613 page = __dev_alloc_pages(gfp, s->fl_pg_order);
618 if (unlikely(!page)) { 614 if (unlikely(!page)) {
619 /* 615 /*
620 * We've failed inour attempt to allocate a "large 616 * We've failed inour attempt to allocate a "large
@@ -624,10 +620,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
624 fl->large_alloc_failed++; 620 fl->large_alloc_failed++;
625 break; 621 break;
626 } 622 }
627 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 623 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
628 624
629 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 625 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
630 PAGE_SIZE << FL_PG_ORDER, 626 PAGE_SIZE << s->fl_pg_order,
631 PCI_DMA_FROMDEVICE); 627 PCI_DMA_FROMDEVICE);
632 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 628 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
633 /* 629 /*
@@ -638,7 +634,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
638 * because DMA mapping resources are typically 634 * because DMA mapping resources are typically
639 * critical resources once they become scarse. 635 * critical resources once they become scarse.
640 */ 636 */
641 __free_pages(page, FL_PG_ORDER); 637 __free_pages(page, s->fl_pg_order);
642 goto out; 638 goto out;
643 } 639 }
644 dma_addr |= RX_LARGE_BUF; 640 dma_addr |= RX_LARGE_BUF;
@@ -694,7 +690,7 @@ out:
694 fl->pend_cred += cred; 690 fl->pend_cred += cred;
695 ring_fl_db(adapter, fl); 691 ring_fl_db(adapter, fl);
696 692
697 if (unlikely(fl_starving(fl))) { 693 if (unlikely(fl_starving(adapter, fl))) {
698 smp_wmb(); 694 smp_wmb();
699 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 695 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
700 } 696 }
@@ -1469,6 +1465,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1469static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1465static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1470 const struct cpl_rx_pkt *pkt) 1466 const struct cpl_rx_pkt *pkt)
1471{ 1467{
1468 struct adapter *adapter = rxq->rspq.adapter;
1469 struct sge *s = &adapter->sge;
1472 int ret; 1470 int ret;
1473 struct sk_buff *skb; 1471 struct sk_buff *skb;
1474 1472
@@ -1479,8 +1477,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1479 return; 1477 return;
1480 } 1478 }
1481 1479
1482 copy_frags(skb, gl, PKTSHIFT); 1480 copy_frags(skb, gl, s->pktshift);
1483 skb->len = gl->tot_len - PKTSHIFT; 1481 skb->len = gl->tot_len - s->pktshift;
1484 skb->data_len = skb->len; 1482 skb->data_len = skb->len;
1485 skb->truesize += skb->data_len; 1483 skb->truesize += skb->data_len;
1486 skb->ip_summed = CHECKSUM_UNNECESSARY; 1484 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1517,6 +1515,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1517 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1515 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1518 (rspq->netdev->features & NETIF_F_RXCSUM); 1516 (rspq->netdev->features & NETIF_F_RXCSUM);
1519 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1517 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1518 struct adapter *adapter = rspq->adapter;
1519 struct sge *s = &adapter->sge;
1520 1520
1521 /* 1521 /*
1522 * If this is a good TCP packet and we have Generic Receive Offload 1522 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1538,7 +1538,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1538 rxq->stats.rx_drops++; 1538 rxq->stats.rx_drops++;
1539 return 0; 1539 return 0;
1540 } 1540 }
1541 __skb_pull(skb, PKTSHIFT); 1541 __skb_pull(skb, s->pktshift);
1542 skb->protocol = eth_type_trans(skb, rspq->netdev); 1542 skb->protocol = eth_type_trans(skb, rspq->netdev);
1543 skb_record_rx_queue(skb, rspq->idx); 1543 skb_record_rx_queue(skb, rspq->idx);
1544 rxq->stats.pkts++; 1544 rxq->stats.pkts++;
@@ -1649,6 +1649,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1649static int process_responses(struct sge_rspq *rspq, int budget) 1649static int process_responses(struct sge_rspq *rspq, int budget)
1650{ 1650{
1651 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1651 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1652 struct adapter *adapter = rspq->adapter;
1653 struct sge *s = &adapter->sge;
1652 int budget_left = budget; 1654 int budget_left = budget;
1653 1655
1654 while (likely(budget_left)) { 1656 while (likely(budget_left)) {
@@ -1698,7 +1700,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1698 BUG_ON(frag >= MAX_SKB_FRAGS); 1700 BUG_ON(frag >= MAX_SKB_FRAGS);
1699 BUG_ON(rxq->fl.avail == 0); 1701 BUG_ON(rxq->fl.avail == 0);
1700 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1702 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1701 bufsz = get_buf_size(sdesc); 1703 bufsz = get_buf_size(adapter, sdesc);
1702 fp->page = sdesc->page; 1704 fp->page = sdesc->page;
1703 fp->offset = rspq->offset; 1705 fp->offset = rspq->offset;
1704 fp->size = min(bufsz, len); 1706 fp->size = min(bufsz, len);
@@ -1727,7 +1729,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1727 */ 1729 */
1728 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1730 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1729 if (likely(ret == 0)) 1731 if (likely(ret == 0))
1730 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1732 rspq->offset += ALIGN(fp->size, s->fl_align);
1731 else 1733 else
1732 restore_rx_bufs(&gl, &rxq->fl, frag); 1734 restore_rx_bufs(&gl, &rxq->fl, frag);
1733 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1735 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1964,7 +1966,7 @@ static void sge_rx_timer_cb(unsigned long data)
1964 * schedule napi but the FL is no longer starving. 1966 * schedule napi but the FL is no longer starving.
1965 * No biggie. 1967 * No biggie.
1966 */ 1968 */
1967 if (fl_starving(fl)) { 1969 if (fl_starving(adapter, fl)) {
1968 struct sge_eth_rxq *rxq; 1970 struct sge_eth_rxq *rxq;
1969 1971
1970 rxq = container_of(fl, struct sge_eth_rxq, fl); 1972 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2048,6 +2050,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2048 int intr_dest, 2050 int intr_dest,
2049 struct sge_fl *fl, rspq_handler_t hnd) 2051 struct sge_fl *fl, rspq_handler_t hnd)
2050{ 2052{
2053 struct sge *s = &adapter->sge;
2051 struct port_info *pi = netdev_priv(dev); 2054 struct port_info *pi = netdev_priv(dev);
2052 struct fw_iq_cmd cmd, rpl; 2055 struct fw_iq_cmd cmd, rpl;
2053 int ret, iqandst, flsz = 0; 2056 int ret, iqandst, flsz = 0;
@@ -2118,7 +2121,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2118 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2121 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2119 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2122 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2120 sizeof(__be64), sizeof(struct rx_sw_desc), 2123 sizeof(__be64), sizeof(struct rx_sw_desc),
2121 &fl->addr, &fl->sdesc, STAT_LEN); 2124 &fl->addr, &fl->sdesc, s->stat_len);
2122 if (!fl->desc) { 2125 if (!fl->desc) {
2123 ret = -ENOMEM; 2126 ret = -ENOMEM;
2124 goto err; 2127 goto err;
@@ -2130,7 +2133,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2130 * free list ring) in Egress Queue Units. 2133 * free list ring) in Egress Queue Units.
2131 */ 2134 */
2132 flsz = (fl->size / FL_PER_EQ_UNIT + 2135 flsz = (fl->size / FL_PER_EQ_UNIT +
2133 STAT_LEN / EQ_UNIT); 2136 s->stat_len / EQ_UNIT);
2134 2137
2135 /* 2138 /*
2136 * Fill in all the relevant firmware Ingress Queue Command 2139 * Fill in all the relevant firmware Ingress Queue Command
@@ -2218,6 +2221,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2218 struct net_device *dev, struct netdev_queue *devq, 2221 struct net_device *dev, struct netdev_queue *devq,
2219 unsigned int iqid) 2222 unsigned int iqid)
2220{ 2223{
2224 struct sge *s = &adapter->sge;
2221 int ret, nentries; 2225 int ret, nentries;
2222 struct fw_eq_eth_cmd cmd, rpl; 2226 struct fw_eq_eth_cmd cmd, rpl;
2223 struct port_info *pi = netdev_priv(dev); 2227 struct port_info *pi = netdev_priv(dev);
@@ -2226,7 +2230,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2226 * Calculate the size of the hardware TX Queue (including the Status 2230 * Calculate the size of the hardware TX Queue (including the Status
2227 * Page on the end of the TX Queue) in units of TX Descriptors. 2231 * Page on the end of the TX Queue) in units of TX Descriptors.
2228 */ 2232 */
2229 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2233 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2230 2234
2231 /* 2235 /*
2232 * Allocate the hardware ring for the TX ring (with space for its 2236 * Allocate the hardware ring for the TX ring (with space for its
@@ -2235,7 +2239,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2235 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2239 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2236 sizeof(struct tx_desc), 2240 sizeof(struct tx_desc),
2237 sizeof(struct tx_sw_desc), 2241 sizeof(struct tx_sw_desc),
2238 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2242 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2239 if (!txq->q.desc) 2243 if (!txq->q.desc)
2240 return -ENOMEM; 2244 return -ENOMEM;
2241 2245
@@ -2308,8 +2312,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2308 */ 2312 */
2309static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2313static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2310{ 2314{
2315 struct sge *s = &adapter->sge;
2316
2311 dma_free_coherent(adapter->pdev_dev, 2317 dma_free_coherent(adapter->pdev_dev,
2312 tq->size * sizeof(*tq->desc) + STAT_LEN, 2318 tq->size * sizeof(*tq->desc) + s->stat_len,
2313 tq->desc, tq->phys_addr); 2319 tq->desc, tq->phys_addr);
2314 tq->cntxt_id = 0; 2320 tq->cntxt_id = 0;
2315 tq->sdesc = NULL; 2321 tq->sdesc = NULL;
@@ -2323,6 +2329,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2323static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2329static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2324 struct sge_fl *fl) 2330 struct sge_fl *fl)
2325{ 2331{
2332 struct sge *s = &adapter->sge;
2326 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2333 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2327 2334
2328 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2335 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2338,7 +2345,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2338 if (fl) { 2345 if (fl) {
2339 free_rx_bufs(adapter, fl, fl->avail); 2346 free_rx_bufs(adapter, fl, fl->avail);
2340 dma_free_coherent(adapter->pdev_dev, 2347 dma_free_coherent(adapter->pdev_dev,
2341 fl->size * sizeof(*fl->desc) + STAT_LEN, 2348 fl->size * sizeof(*fl->desc) + s->stat_len,
2342 fl->desc, fl->addr); 2349 fl->desc, fl->addr);
2343 kfree(fl->sdesc); 2350 kfree(fl->sdesc);
2344 fl->sdesc = NULL; 2351 fl->sdesc = NULL;
@@ -2424,6 +2431,7 @@ int t4vf_sge_init(struct adapter *adapter)
2424 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2431 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2425 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2432 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2426 struct sge *s = &adapter->sge; 2433 struct sge *s = &adapter->sge;
2434 unsigned int ingpadboundary, ingpackboundary;
2427 2435
2428 /* 2436 /*
2429 * Start by vetting the basic SGE parameters which have been set up by 2437 * Start by vetting the basic SGE parameters which have been set up by
@@ -2444,12 +2452,48 @@ int t4vf_sge_init(struct adapter *adapter)
2444 * Now translate the adapter parameters into our internal forms. 2452 * Now translate the adapter parameters into our internal forms.
2445 */ 2453 */
2446 if (fl1) 2454 if (fl1)
2447 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2455 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2448 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2456 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2449 ? 128 : 64); 2457 ? 128 : 64);
2450 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2458 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2451 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2459
2452 SGE_INGPADBOUNDARY_SHIFT); 2460 /* T4 uses a single control field to specify both the PCIe Padding and
2461 * Packing Boundary. T5 introduced the ability to specify these
2462 * separately. The actual Ingress Packet Data alignment boundary
2463 * within Packed Buffer Mode is the maximum of these two
2464 * specifications. (Note that it makes no real practical sense to
2465 * have the Pading Boudary be larger than the Packing Boundary but you
2466 * could set the chip up that way and, in fact, legacy T4 code would
2467 * end doing this because it would initialize the Padding Boundary and
2468 * leave the Packing Boundary initialized to 0 (16 bytes).)
2469 */
2470 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2471 X_INGPADBOUNDARY_SHIFT);
2472 if (is_t4(adapter->params.chip)) {
2473 s->fl_align = ingpadboundary;
2474 } else {
2475 /* T5 has a different interpretation of one of the PCIe Packing
2476 * Boundary values.
2477 */
2478 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2479 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2480 ingpackboundary = 16;
2481 else
2482 ingpackboundary = 1 << (ingpackboundary +
2483 INGPACKBOUNDARY_SHIFT_X);
2484
2485 s->fl_align = max(ingpadboundary, ingpackboundary);
2486 }
2487
2488 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2489 * timer will attempt to refill it. This needs to be larger than the
2490 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2491 * stuck waiting for new packets while the SGE is waiting for us to
2492 * give it more Free List entries. (Note that the SGE's Egress
2493 * Congestion Threshold is in units of 2 Free List pointers.)
2494 */
2495 s->fl_starve_thres
2496 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
2453 2497
2454 /* 2498 /*
2455 * Set up tasklet timers. 2499 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 2cfa4396b003..a608c6657d63 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
134 */ 134 */
135struct sge_params { 135struct sge_params {
136 u32 sge_control; /* padding, boundaries, lengths, etc. */ 136 u32 sge_control; /* padding, boundaries, lengths, etc. */
137 u32 sge_control2; /* T5: more of the same */
137 u32 sge_host_page_size; /* RDMA page sizes */ 138 u32 sge_host_page_size; /* RDMA page sizes */
138 u32 sge_queues_per_page; /* RDMA queues/page */ 139 u32 sge_queues_per_page; /* RDMA queues/page */
139 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ 140 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
140 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ 141 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
141 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ 142 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
143 u32 sge_congestion_control; /* congestion thresholds, etc. */
142 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ 144 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
143 u32 sge_timer_value_2_and_3; 145 u32 sge_timer_value_2_and_3;
144 u32 sge_timer_value_4_and_5; 146 u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 570b895ae06f..fae0c95e1a6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
468 sge_params->sge_timer_value_2_and_3 = vals[5]; 468 sge_params->sge_timer_value_2_and_3 = vals[5];
469 sge_params->sge_timer_value_4_and_5 = vals[6]; 469 sge_params->sge_timer_value_4_and_5 = vals[6];
470 470
471 /* T4 uses a single control field to specify both the PCIe Padding and
472 * Packing Boundary. T5 introduced the ability to specify these
473 * separately with the Padding Boundary in SGE_CONTROL and and Packing
474 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
475 * SGE_CONTROL in order to determine how ingress packet data will be
476 * laid out in Packed Buffer Mode. Unfortunately, older versions of
477 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
478 * failure grabbing it we throw an error since we can't figure out the
479 * right value.
480 */
481 if (!is_t4(adapter->params.chip)) {
482 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
483 FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
484 v = t4vf_query_params(adapter, 1, params, vals);
485 if (v != FW_SUCCESS) {
486 dev_err(adapter->pdev_dev,
487 "Unable to get SGE Control2; "
488 "probably old firmware.\n");
489 return v;
490 }
491 sge_params->sge_control2 = vals[0];
492 }
493
471 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 494 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
472 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); 495 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
473 v = t4vf_query_params(adapter, 1, params, vals); 496 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
497 FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
498 v = t4vf_query_params(adapter, 2, params, vals);
474 if (v) 499 if (v)
475 return v; 500 return v;
476 sge_params->sge_ingress_rx_threshold = vals[0]; 501 sge_params->sge_ingress_rx_threshold = vals[0];
502 sge_params->sge_congestion_control = vals[1];
477 503
478 return 0; 504 return 0;
479} 505}