aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAriel Elior <ariele@broadcom.com>2013-01-01 00:22:37 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-02 04:45:07 -0500
commit8db573baa5cf1f65ed94039ac3100ea5a94260de (patch)
treee1e87d6ff08fe89806015884f2040f58d85cc09e /drivers
parent67c431a5f2f3e0dda511509ed5773346839c07c0 (diff)
bnx2x: Support of PF driver of a VF setup_q request
Upon receiving a 'setup_q' request from the VF over the VF <-> PF channel the PF driver will open a corresponding queue in the device. The PF driver configures the queue with appropriate mac address, vlan configuration, etc from the VF. Signed-off-by: Ariel Elior <ariele@broadcom.com> Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c906
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h177
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c146
5 files changed, 1071 insertions, 177 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d096e3e85287..fbe8be31f673 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -969,6 +969,7 @@ extern struct workqueue_struct *bnx2x_wq;
969#define BNX2X_MAX_NUM_OF_VFS 64 969#define BNX2X_MAX_NUM_OF_VFS 64
970#define BNX2X_VF_CID_WND 0 970#define BNX2X_VF_CID_WND 0
971#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND) 971#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
972#define BNX2X_CLIENTS_PER_VF 1
972#define BNX2X_FIRST_VF_CID 256 973#define BNX2X_FIRST_VF_CID 256
973#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF) 974#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
974#define BNX2X_VF_ID_INVALID 0xFF 975#define BNX2X_VF_ID_INVALID 0xFF
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 022c491cd0fb..cdb073a6297e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2029,7 +2029,7 @@ static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2029 2029
2030static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 2030static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2031{ 2031{
2032 int num_groups; 2032 int num_groups, vf_headroom = 0;
2033 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 2033 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2034 2034
2035 /* number of queues for statistics is number of eth queues + FCoE */ 2035 /* number of queues for statistics is number of eth queues + FCoE */
@@ -2042,18 +2042,26 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2042 */ 2042 */
2043 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats; 2043 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2044 2044
2045 /* vf stats appear in the request list, but their data is allocated by
2046 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2047 * it is used to determine where to place the vf stats queries in the
2048 * request struct
2049 */
2050 if (IS_SRIOV(bp))
2051 vf_headroom = bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
2052
2045 /* Request is built from stats_query_header and an array of 2053 /* Request is built from stats_query_header and an array of
2046 * stats_query_cmd_group each of which contains 2054 * stats_query_cmd_group each of which contains
2047 * STATS_QUERY_CMD_COUNT rules. The real number or requests is 2055 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2048 * configured in the stats_query_header. 2056 * configured in the stats_query_header.
2049 */ 2057 */
2050 num_groups = 2058 num_groups =
2051 (((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) + 2059 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2052 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 2060 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2053 1 : 0)); 2061 1 : 0));
2054 2062
2055 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, num_groups %d\n", 2063 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2056 bp->fw_stats_num, num_groups); 2064 bp->fw_stats_num, vf_headroom, num_groups);
2057 bp->fw_stats_req_sz = sizeof(struct stats_query_header) + 2065 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2058 num_groups * sizeof(struct stats_query_cmd_group); 2066 num_groups * sizeof(struct stats_query_cmd_group);
2059 2067
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 71e1c6fb205d..5b47b0849a58 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -100,10 +100,233 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
100 mmiowb(); 100 mmiowb();
101 barrier(); 101 barrier();
102} 102}
103/* VFOP - VF slow-path operation support */
104
105/* VFOP operations states */
106enum bnx2x_vfop_qctor_state {
107 BNX2X_VFOP_QCTOR_INIT,
108 BNX2X_VFOP_QCTOR_SETUP,
109 BNX2X_VFOP_QCTOR_INT_EN
110};
111
112enum bnx2x_vfop_vlan_mac_state {
113 BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
114 BNX2X_VFOP_VLAN_MAC_CLEAR,
115 BNX2X_VFOP_VLAN_MAC_CHK_DONE,
116 BNX2X_VFOP_MAC_CONFIG_LIST,
117 BNX2X_VFOP_VLAN_CONFIG_LIST,
118 BNX2X_VFOP_VLAN_CONFIG_LIST_0
119};
120
121enum bnx2x_vfop_qsetup_state {
122 BNX2X_VFOP_QSETUP_CTOR,
123 BNX2X_VFOP_QSETUP_VLAN0,
124 BNX2X_VFOP_QSETUP_DONE
125};
126
127#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
128
129void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
130 struct bnx2x_queue_init_params *init_params,
131 struct bnx2x_queue_setup_params *setup_params,
132 u16 q_idx, u16 sb_idx)
133{
134 DP(BNX2X_MSG_IOV,
135 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
136 vf->abs_vfid,
137 q_idx,
138 sb_idx,
139 init_params->tx.sb_cq_index,
140 init_params->tx.hc_rate,
141 setup_params->flags,
142 setup_params->txq_params.traffic_type);
143}
103 144
104static int bnx2x_ari_enabled(struct pci_dev *dev) 145void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
146 struct bnx2x_queue_init_params *init_params,
147 struct bnx2x_queue_setup_params *setup_params,
148 u16 q_idx, u16 sb_idx)
105{ 149{
106 return dev->bus->self && dev->bus->self->ari_enabled; 150 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
151
152 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
153 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
154 vf->abs_vfid,
155 q_idx,
156 sb_idx,
157 init_params->rx.sb_cq_index,
158 init_params->rx.hc_rate,
159 setup_params->gen_params.mtu,
160 rxq_params->buf_sz,
161 rxq_params->sge_buf_sz,
162 rxq_params->max_sges_pkt,
163 rxq_params->tpa_agg_sz,
164 setup_params->flags,
165 rxq_params->drop_flags,
166 rxq_params->cache_line_log);
167}
168
169void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
170 struct bnx2x_virtf *vf,
171 struct bnx2x_vf_queue *q,
172 struct bnx2x_vfop_qctor_params *p,
173 unsigned long q_type)
174{
175 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
176 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
177
178 /* INIT */
179
180 /* Enable host coalescing in the transition to INIT state */
181 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
182 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
183
184 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
185 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
186
187 /* FW SB ID */
188 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
189 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
190
191 /* context */
192 init_p->cxts[0] = q->cxt;
193
194 /* SETUP */
195
196 /* Setup-op general parameters */
197 setup_p->gen_params.spcl_id = vf->sp_cl_id;
198 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
199
200 /* Setup-op pause params:
201 * Nothing to do, the pause thresholds are set by default to 0 which
202 * effectively turns off the feature for this queue. We don't want
203 * one queue (VF) to interfering with another queue (another VF)
204 */
205 if (vf->cfg_flags & VF_CFG_FW_FC)
206 BNX2X_ERR("No support for pause to VFs (abs_vfid: %d)\n",
207 vf->abs_vfid);
208 /* Setup-op flags:
209 * collect statistics, zero statistics, local-switching, security,
210 * OV for Flex10, RSS and MCAST for leading
211 */
212 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
213 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
214
215 /* for VFs, enable tx switching, bd coherency, and mac address
216 * anti-spoofing
217 */
218 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
219 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
220 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
221
222 if (vfq_is_leading(q)) {
223 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
224 __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
225 }
226
227 /* Setup-op rx parameters */
228 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
229 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
230
231 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
232 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
233 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
234
235 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
236 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
237 }
238
239 /* Setup-op tx parameters */
240 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
241 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
242 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
243 }
244}
245
246/* VFOP queue construction */
247static void bnx2x_vfop_qctor(struct bnx2x *bp, struct bnx2x_virtf *vf)
248{
249 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
250 struct bnx2x_vfop_args_qctor *args = &vfop->args.qctor;
251 struct bnx2x_queue_state_params *q_params = &vfop->op_p->qctor.qstate;
252 enum bnx2x_vfop_qctor_state state = vfop->state;
253
254 bnx2x_vfop_reset_wq(vf);
255
256 if (vfop->rc < 0)
257 goto op_err;
258
259 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
260
261 switch (state) {
262 case BNX2X_VFOP_QCTOR_INIT:
263
264 /* has this queue already been opened? */
265 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
266 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
267 DP(BNX2X_MSG_IOV,
268 "Entered qctor but queue was already up. Aborting gracefully\n");
269 goto op_done;
270 }
271
272 /* next state */
273 vfop->state = BNX2X_VFOP_QCTOR_SETUP;
274
275 q_params->cmd = BNX2X_Q_CMD_INIT;
276 vfop->rc = bnx2x_queue_state_change(bp, q_params);
277
278 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
279
280 case BNX2X_VFOP_QCTOR_SETUP:
281 /* next state */
282 vfop->state = BNX2X_VFOP_QCTOR_INT_EN;
283
284 /* copy pre-prepared setup params to the queue-state params */
285 vfop->op_p->qctor.qstate.params.setup =
286 vfop->op_p->qctor.prep_qsetup;
287
288 q_params->cmd = BNX2X_Q_CMD_SETUP;
289 vfop->rc = bnx2x_queue_state_change(bp, q_params);
290
291 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
292
293 case BNX2X_VFOP_QCTOR_INT_EN:
294
295 /* enable interrupts */
296 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, args->sb_idx),
297 USTORM_ID, 0, IGU_INT_ENABLE, 0);
298 goto op_done;
299 default:
300 bnx2x_vfop_default(state);
301 }
302op_err:
303 BNX2X_ERR("QCTOR[%d:%d] error: cmd %d, rc %d\n",
304 vf->abs_vfid, args->qid, q_params->cmd, vfop->rc);
305op_done:
306 bnx2x_vfop_end(bp, vf, vfop);
307op_pending:
308 return;
309}
310
311static int bnx2x_vfop_qctor_cmd(struct bnx2x *bp,
312 struct bnx2x_virtf *vf,
313 struct bnx2x_vfop_cmd *cmd,
314 int qid)
315{
316 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
317
318 if (vfop) {
319 vf->op_params.qctor.qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
320
321 vfop->args.qctor.qid = qid;
322 vfop->args.qctor.sb_idx = bnx2x_vfq(vf, qid, sb_idx);
323
324 bnx2x_vfop_opset(BNX2X_VFOP_QCTOR_INIT,
325 bnx2x_vfop_qctor, cmd->done);
326 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qctor,
327 cmd->block);
328 }
329 return -ENOMEM;
107} 330}
108 331
109static void 332static void
@@ -117,225 +340,342 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
117 } 340 }
118} 341}
119 342
120static void 343/* VFOP MAC/VLAN helpers */
121bnx2x_get_vf_igu_cam_info(struct bnx2x *bp) 344static inline void bnx2x_vfop_credit(struct bnx2x *bp,
345 struct bnx2x_vfop *vfop,
346 struct bnx2x_vlan_mac_obj *obj)
122{ 347{
123 int sb_id; 348 struct bnx2x_vfop_args_filters *args = &vfop->args.filters;
124 u32 val;
125 u8 fid;
126 349
127 /* IGU in normal mode - read CAM */ 350 /* update credit only if there is no error
128 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) { 351 * and a valid credit counter
129 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4); 352 */
130 if (!(val & IGU_REG_MAPPING_MEMORY_VALID)) 353 if (!vfop->rc && args->credit) {
131 continue; 354 int cnt = 0;
132 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID); 355 struct list_head *pos;
133 if (!(fid & IGU_FID_ENCODE_IS_PF))
134 bnx2x_vf_set_igu_info(bp, sb_id,
135 (fid & IGU_FID_VF_NUM_MASK));
136 356
137 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n", 357 list_for_each(pos, &obj->head)
138 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"), 358 cnt++;
139 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) : 359
140 (fid & IGU_FID_VF_NUM_MASK)), sb_id, 360 atomic_set(args->credit, cnt);
141 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
142 } 361 }
143} 362}
144 363
145static void __bnx2x_iov_free_vfdb(struct bnx2x *bp) 364static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
365 struct bnx2x_vfop_filter *pos,
366 struct bnx2x_vlan_mac_data *user_req)
146{ 367{
147 if (bp->vfdb) { 368 user_req->cmd = pos->add ? BNX2X_VLAN_MAC_ADD :
148 kfree(bp->vfdb->vfqs); 369 BNX2X_VLAN_MAC_DEL;
149 kfree(bp->vfdb->vfs); 370
150 kfree(bp->vfdb); 371 switch (pos->type) {
372 case BNX2X_VFOP_FILTER_MAC:
373 memcpy(user_req->u.mac.mac, pos->mac, ETH_ALEN);
374 break;
375 case BNX2X_VFOP_FILTER_VLAN:
376 user_req->u.vlan.vlan = pos->vid;
377 break;
378 default:
379 BNX2X_ERR("Invalid filter type, skipping\n");
380 return 1;
151 } 381 }
152 bp->vfdb = NULL; 382 return 0;
153} 383}
154 384
155static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 385static int
386bnx2x_vfop_config_vlan0(struct bnx2x *bp,
387 struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
388 bool add)
156{ 389{
157 int pos; 390 int rc;
158 struct pci_dev *dev = bp->pdev;
159
160 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
161 if (!pos) {
162 BNX2X_ERR("failed to find SRIOV capability in device\n");
163 return -ENODEV;
164 }
165 391
166 iov->pos = pos; 392 vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
167 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos); 393 BNX2X_VLAN_MAC_DEL;
168 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); 394 vlan_mac->user_req.u.vlan.vlan = 0;
169 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
170 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
171 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
172 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
173 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
174 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
175 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
176 395
177 return 0; 396 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
397 if (rc == -EEXIST)
398 rc = 0;
399 return rc;
178} 400}
179 401
180static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov) 402static int bnx2x_vfop_config_list(struct bnx2x *bp,
403 struct bnx2x_vfop_filters *filters,
404 struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
181{ 405{
182 u32 val; 406 struct bnx2x_vfop_filter *pos, *tmp;
183 407 struct list_head rollback_list, *filters_list = &filters->head;
184 /* read the SRIOV capability structure 408 struct bnx2x_vlan_mac_data *user_req = &vlan_mac->user_req;
185 * The fields can be read via configuration read or 409 int rc = 0, cnt = 0;
186 * directly from the device (starting at offset PCICFG_OFFSET)
187 */
188 if (bnx2x_sriov_pci_cfg_info(bp, iov))
189 return -ENODEV;
190 410
191 /* get the number of SRIOV bars */ 411 INIT_LIST_HEAD(&rollback_list);
192 iov->nres = 0;
193 412
194 /* read the first_vfid */ 413 list_for_each_entry_safe(pos, tmp, filters_list, link) {
195 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF); 414 if (bnx2x_vfop_set_user_req(bp, pos, user_req))
196 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK) 415 continue;
197 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
198 416
199 DP(BNX2X_MSG_IOV, 417 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
200 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", 418 if (rc >= 0) {
201 BP_FUNC(bp), 419 cnt += pos->add ? 1 : -1;
202 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total, 420 list_del(&pos->link);
203 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); 421 list_add(&pos->link, &rollback_list);
422 rc = 0;
423 } else if (rc == -EEXIST) {
424 rc = 0;
425 } else {
426 BNX2X_ERR("Failed to add a new vlan_mac command\n");
427 break;
428 }
429 }
204 430
205 return 0; 431 /* rollback if error or too many rules added */
432 if (rc || cnt > filters->add_cnt) {
433 BNX2X_ERR("error or too many rules added. Performing rollback\n");
434 list_for_each_entry_safe(pos, tmp, &rollback_list, link) {
435 pos->add = !pos->add; /* reverse op */
436 bnx2x_vfop_set_user_req(bp, pos, user_req);
437 bnx2x_config_vlan_mac(bp, vlan_mac);
438 list_del(&pos->link);
439 }
440 cnt = 0;
441 if (!rc)
442 rc = -EINVAL;
443 }
444 filters->add_cnt = cnt;
445 return rc;
206} 446}
207 447
208static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp) 448/* VFOP set VLAN/MAC */
449static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
209{ 450{
210 int i; 451 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
211 u8 queue_count = 0; 452 struct bnx2x_vlan_mac_ramrod_params *vlan_mac = &vfop->op_p->vlan_mac;
453 struct bnx2x_vlan_mac_obj *obj = vlan_mac->vlan_mac_obj;
454 struct bnx2x_vfop_filters *filters = vfop->args.filters.multi_filter;
212 455
213 if (IS_SRIOV(bp)) 456 enum bnx2x_vfop_vlan_mac_state state = vfop->state;
214 for_each_vf(bp, i)
215 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
216 457
217 return queue_count; 458 if (vfop->rc < 0)
218} 459 goto op_err;
219 460
220/* must be called after PF bars are mapped */ 461 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
221int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
222 int num_vfs_param)
223{
224 int err, i, qcount;
225 struct bnx2x_sriov *iov;
226 struct pci_dev *dev = bp->pdev;
227 462
228 bp->vfdb = NULL; 463 bnx2x_vfop_reset_wq(vf);
229 464
230 /* verify sriov capability is present in configuration space */ 465 switch (state) {
231 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV)) { 466 case BNX2X_VFOP_VLAN_MAC_CLEAR:
232 DP(BNX2X_MSG_IOV, "no sriov - capability not found\n"); 467 /* next state */
233 return 0; 468 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
234 }
235 469
236 /* verify is pf */ 470 /* do delete */
237 if (IS_VF(bp)) 471 vfop->rc = obj->delete_all(bp, obj,
238 return 0; 472 &vlan_mac->user_req.vlan_mac_flags,
473 &vlan_mac->ramrod_flags);
239 474
240 /* verify chip revision */ 475 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
241 if (CHIP_IS_E1x(bp))
242 return 0;
243 476
244 /* check if SRIOV support is turned off */ 477 case BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE:
245 if (!num_vfs_param) 478 /* next state */
246 return 0; 479 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
247 480
248 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */ 481 /* do config */
249 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) { 482 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
250 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n", 483 if (vfop->rc == -EEXIST)
251 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID); 484 vfop->rc = 0;
252 return 0;
253 }
254 485
255 /* SRIOV can be enabled only with MSIX */ 486 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
256 if (int_mode_param == BNX2X_INT_MODE_MSI ||
257 int_mode_param == BNX2X_INT_MODE_INTX) {
258 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
259 return 0;
260 }
261 487
262 /* verify ari is enabled */ 488 case BNX2X_VFOP_VLAN_MAC_CHK_DONE:
263 if (!bnx2x_ari_enabled(bp->pdev)) { 489 vfop->rc = !!obj->raw.check_pending(&obj->raw);
264 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 490 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
265 return 0;
266 }
267 491
268 /* verify igu is in normal mode */ 492 case BNX2X_VFOP_MAC_CONFIG_LIST:
269 if (CHIP_INT_MODE_IS_BC(bp)) { 493 /* next state */
270 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 494 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
271 return 0;
272 }
273 495
274 /* allocate the vfs database */ 496 /* do list config */
275 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL); 497 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
276 if (!bp->vfdb) { 498 if (vfop->rc)
277 BNX2X_ERR("failed to allocate vf database\n"); 499 goto op_err;
278 err = -ENOMEM;
279 goto failed;
280 }
281 500
282 /* get the sriov info - Linux already collected all the pertinent 501 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
283 * information, however the sriov structure is for the private use 502 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
284 * of the pci module. Also we want this information regardless 503 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
285 * of the hyper-visor.
286 */
287 iov = &(bp->vfdb->sriov);
288 err = bnx2x_sriov_info(bp, iov);
289 if (err)
290 goto failed;
291 504
292 /* SR-IOV capability was enabled but there are no VFs*/ 505 case BNX2X_VFOP_VLAN_CONFIG_LIST:
293 if (iov->total == 0) 506 /* next state */
294 goto failed; 507 vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
295 508
296 /* calcuate the actual number of VFs */ 509 /* remove vlan0 - could be no-op */
297 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); 510 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
511 if (vfop->rc)
512 goto op_err;
298 513
299 /* allcate the vf array */ 514 /* Do vlan list config. if this operation fails we try to
300 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 515 * restore vlan0 to keep the queue is working order
301 BNX2X_NR_VIRTFN(bp), GFP_KERNEL); 516 */
302 if (!bp->vfdb->vfs) { 517 vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
303 BNX2X_ERR("failed to allocate vf array\n"); 518 if (!vfop->rc) {
304 err = -ENOMEM; 519 set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
305 goto failed; 520 vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
521 }
522 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
523
524 case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
525 /* next state */
526 vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
527
528 if (list_empty(&obj->head))
529 /* add vlan0 */
530 vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
531 bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
532
533 default:
534 bnx2x_vfop_default(state);
306 } 535 }
536op_err:
537 BNX2X_ERR("VLAN-MAC error: rc %d\n", vfop->rc);
538op_done:
539 kfree(filters);
540 bnx2x_vfop_credit(bp, vfop, obj);
541 bnx2x_vfop_end(bp, vf, vfop);
542op_pending:
543 return;
544}
307 545
308 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */ 546struct bnx2x_vfop_vlan_mac_flags {
309 for_each_vf(bp, i) { 547 bool drv_only;
310 bnx2x_vf(bp, i, index) = i; 548 bool dont_consume;
311 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i; 549 bool single_cmd;
312 bnx2x_vf(bp, i, state) = VF_FREE; 550 bool add;
313 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head)); 551};
314 mutex_init(&bnx2x_vf(bp, i, op_mutex)); 552
315 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE; 553static void
554bnx2x_vfop_vlan_mac_prep_ramrod(struct bnx2x_vlan_mac_ramrod_params *ramrod,
555 struct bnx2x_vfop_vlan_mac_flags *flags)
556{
557 struct bnx2x_vlan_mac_data *ureq = &ramrod->user_req;
558
559 memset(ramrod, 0, sizeof(*ramrod));
560
561 /* ramrod flags */
562 if (flags->drv_only)
563 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod->ramrod_flags);
564 if (flags->single_cmd)
565 set_bit(RAMROD_EXEC, &ramrod->ramrod_flags);
566
567 /* mac_vlan flags */
568 if (flags->dont_consume)
569 set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT, &ureq->vlan_mac_flags);
570
571 /* cmd */
572 ureq->cmd = flags->add ? BNX2X_VLAN_MAC_ADD : BNX2X_VLAN_MAC_DEL;
573}
574
575int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
576 struct bnx2x_virtf *vf,
577 struct bnx2x_vfop_cmd *cmd,
578 int qid, u16 vid, bool add)
579{
580 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
581
582 if (vfop) {
583 struct bnx2x_vfop_args_filters filters = {
584 .multi_filter = NULL, /* single command */
585 .credit = &bnx2x_vfq(vf, qid, vlan_count),
586 };
587 struct bnx2x_vfop_vlan_mac_flags flags = {
588 .drv_only = false,
589 .dont_consume = (filters.credit != NULL),
590 .single_cmd = true,
591 .add = add,
592 };
593 struct bnx2x_vlan_mac_ramrod_params *ramrod =
594 &vf->op_params.vlan_mac;
595
596 /* set ramrod params */
597 bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
598 ramrod->user_req.u.vlan.vlan = vid;
599
600 /* set object */
601 ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
602
603 /* set extra args */
604 vfop->args.filters = filters;
605
606 bnx2x_vfop_opset(BNX2X_VFOP_VLAN_MAC_CONFIG_SINGLE,
607 bnx2x_vfop_vlan_mac, cmd->done);
608 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_vlan_mac,
609 cmd->block);
316 } 610 }
611 return -ENOMEM;
612}
317 613
318 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */ 614/* VFOP queue setup (queue constructor + set vlan 0) */
319 bnx2x_get_vf_igu_cam_info(bp); 615static void bnx2x_vfop_qsetup(struct bnx2x *bp, struct bnx2x_virtf *vf)
616{
617 struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
618 int qid = vfop->args.qctor.qid;
619 enum bnx2x_vfop_qsetup_state state = vfop->state;
620 struct bnx2x_vfop_cmd cmd = {
621 .done = bnx2x_vfop_qsetup,
622 .block = false,
623 };
624
625 if (vfop->rc < 0)
626 goto op_err;
627
628 DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
629
630 switch (state) {
631 case BNX2X_VFOP_QSETUP_CTOR:
632 /* init the queue ctor command */
633 vfop->state = BNX2X_VFOP_QSETUP_VLAN0;
634 vfop->rc = bnx2x_vfop_qctor_cmd(bp, vf, &cmd, qid);
635 if (vfop->rc)
636 goto op_err;
637 return;
320 638
321 /* get the total queue count and allocate the global queue arrays */ 639 case BNX2X_VFOP_QSETUP_VLAN0:
322 qcount = bnx2x_iov_get_max_queue_count(bp); 640 /* skip if non-leading or FPGA/EMU*/
641 if (qid)
642 goto op_done;
323 643
324 /* allocate the queue arrays for all VFs */ 644 /* init the queue set-vlan command (for vlan 0) */
325 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue), 645 vfop->state = BNX2X_VFOP_QSETUP_DONE;
326 GFP_KERNEL); 646 vfop->rc = bnx2x_vfop_vlan_set_cmd(bp, vf, &cmd, qid, 0, true);
327 if (!bp->vfdb->vfqs) { 647 if (vfop->rc)
328 BNX2X_ERR("failed to allocate vf queue array\n"); 648 goto op_err;
329 err = -ENOMEM; 649 return;
330 goto failed; 650op_err:
651 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
652op_done:
653 case BNX2X_VFOP_QSETUP_DONE:
654 bnx2x_vfop_end(bp, vf, vfop);
655 return;
656 default:
657 bnx2x_vfop_default(state);
331 } 658 }
659}
332 660
333 return 0; 661int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
334failed: 662 struct bnx2x_virtf *vf,
335 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err); 663 struct bnx2x_vfop_cmd *cmd,
336 __bnx2x_iov_free_vfdb(bp); 664 int qid)
337 return err; 665{
666 struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
667
668 if (vfop) {
669 vfop->args.qctor.qid = qid;
670
671 bnx2x_vfop_opset(BNX2X_VFOP_QSETUP_CTOR,
672 bnx2x_vfop_qsetup, cmd->done);
673 return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qsetup,
674 cmd->block);
675 }
676 return -ENOMEM;
338} 677}
678
339/* VF enable primitives 679/* VF enable primitives
340 * when pretend is required the caller is responsible 680 * when pretend is required the caller is responsible
341 * for calling pretend prior to calling these routines 681 * for calling pretend prior to calling these routines
@@ -610,6 +950,228 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
610 } 950 }
611} 951}
612 952
953static int bnx2x_ari_enabled(struct pci_dev *dev)
954{
955 return dev->bus->self && dev->bus->self->ari_enabled;
956}
957
958static void
959bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
960{
961 int sb_id;
962 u32 val;
963 u8 fid;
964
965 /* IGU in normal mode - read CAM */
966 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
967 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
968 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
969 continue;
970 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
971 if (!(fid & IGU_FID_ENCODE_IS_PF))
972 bnx2x_vf_set_igu_info(bp, sb_id,
973 (fid & IGU_FID_VF_NUM_MASK));
974
975 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
976 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
977 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
978 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
979 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
980 }
981}
982
983static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
984{
985 if (bp->vfdb) {
986 kfree(bp->vfdb->vfqs);
987 kfree(bp->vfdb->vfs);
988 kfree(bp->vfdb);
989 }
990 bp->vfdb = NULL;
991}
992
993static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
994{
995 int pos;
996 struct pci_dev *dev = bp->pdev;
997
998 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
999 if (!pos) {
1000 BNX2X_ERR("failed to find SRIOV capability in device\n");
1001 return -ENODEV;
1002 }
1003
1004 iov->pos = pos;
1005 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1006 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1007 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1008 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1009 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1010 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1011 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1012 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1013 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1014
1015 return 0;
1016}
1017
1018static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1019{
1020 u32 val;
1021
1022 /* read the SRIOV capability structure
1023 * The fields can be read via configuration read or
1024 * directly from the device (starting at offset PCICFG_OFFSET)
1025 */
1026 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1027 return -ENODEV;
1028
1029 /* get the number of SRIOV bars */
1030 iov->nres = 0;
1031
1032 /* read the first_vfid */
1033 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1034 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1035 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1036
1037 DP(BNX2X_MSG_IOV,
1038 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1039 BP_FUNC(bp),
1040 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1041 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1042
1043 return 0;
1044}
1045
1046static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
1047{
1048 int i;
1049 u8 queue_count = 0;
1050
1051 if (IS_SRIOV(bp))
1052 for_each_vf(bp, i)
1053 queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
1054
1055 return queue_count;
1056}
1057
1058/* must be called after PF bars are mapped */
1059int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1060 int num_vfs_param)
1061{
1062 int err, i, qcount;
1063 struct bnx2x_sriov *iov;
1064 struct pci_dev *dev = bp->pdev;
1065
1066 bp->vfdb = NULL;
1067
1068 /* verify is pf */
1069 if (IS_VF(bp))
1070 return 0;
1071
1072 /* verify sriov capability is present in configuration space */
1073 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1074 return 0;
1075
1076 /* verify chip revision */
1077 if (CHIP_IS_E1x(bp))
1078 return 0;
1079
1080 /* check if SRIOV support is turned off */
1081 if (!num_vfs_param)
1082 return 0;
1083
1084 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1085 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1086 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1087 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1088 return 0;
1089 }
1090
1091 /* SRIOV can be enabled only with MSIX */
1092 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1093 int_mode_param == BNX2X_INT_MODE_INTX)
1094 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1095
1096 err = -EIO;
1097 /* verify ari is enabled */
1098 if (!bnx2x_ari_enabled(bp->pdev)) {
1099 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n");
1100 return err;
1101 }
1102
1103 /* verify igu is in normal mode */
1104 if (CHIP_INT_MODE_IS_BC(bp)) {
1105 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1106 return err;
1107 }
1108
1109 /* allocate the vfs database */
1110 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1111 if (!bp->vfdb) {
1112 BNX2X_ERR("failed to allocate vf database\n");
1113 err = -ENOMEM;
1114 goto failed;
1115 }
1116
1117 /* get the sriov info - Linux already collected all the pertinent
1118 * information, however the sriov structure is for the private use
1119 * of the pci module. Also we want this information regardless
1120 * of the hyper-visor.
1121 */
1122 iov = &(bp->vfdb->sriov);
1123 err = bnx2x_sriov_info(bp, iov);
1124 if (err)
1125 goto failed;
1126
1127 /* SR-IOV capability was enabled but there are no VFs*/
1128 if (iov->total == 0)
1129 goto failed;
1130
1131 /* calculate the actual number of VFs */
1132 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param);
1133
1134 /* allocate the vf array */
1135 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1136 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1137 if (!bp->vfdb->vfs) {
1138 BNX2X_ERR("failed to allocate vf array\n");
1139 err = -ENOMEM;
1140 goto failed;
1141 }
1142
1143 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1144 for_each_vf(bp, i) {
1145 bnx2x_vf(bp, i, index) = i;
1146 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1147 bnx2x_vf(bp, i, state) = VF_FREE;
1148 INIT_LIST_HEAD(&bnx2x_vf(bp, i, op_list_head));
1149 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1150 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1151 }
1152
1153 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1154 bnx2x_get_vf_igu_cam_info(bp);
1155
1156 /* get the total queue count and allocate the global queue arrays */
1157 qcount = bnx2x_iov_get_max_queue_count(bp);
1158
1159 /* allocate the queue arrays for all VFs */
1160 bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
1161 GFP_KERNEL);
1162 if (!bp->vfdb->vfqs) {
1163 BNX2X_ERR("failed to allocate vf queue array\n");
1164 err = -ENOMEM;
1165 goto failed;
1166 }
1167
1168 return 0;
1169failed:
1170 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1171 __bnx2x_iov_free_vfdb(bp);
1172 return err;
1173}
1174
613void bnx2x_iov_remove_one(struct bnx2x *bp) 1175void bnx2x_iov_remove_one(struct bnx2x *bp)
614{ 1176{
615 /* if SRIOV is not enabled there's nothing to do */ 1177 /* if SRIOV is not enabled there's nothing to do */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index c3d27b5a713a..49d452e91174 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -26,6 +26,8 @@
26 * The VF array is indexed by the relative vfid. 26 * The VF array is indexed by the relative vfid.
27 */ 27 */
28#define BNX2X_VF_MAX_QUEUES 16 28#define BNX2X_VF_MAX_QUEUES 16
29#define BNX2X_VF_MAX_TPA_AGG_QUEUES 8
30
29struct bnx2x_sriov { 31struct bnx2x_sriov {
30 u32 first_vf_in_pf; 32 u32 first_vf_in_pf;
31 33
@@ -91,6 +93,11 @@ struct bnx2x_virtf;
91/* VFOP definitions */ 93/* VFOP definitions */
92typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf); 94typedef void (*vfop_handler_t)(struct bnx2x *bp, struct bnx2x_virtf *vf);
93 95
96struct bnx2x_vfop_cmd {
97 vfop_handler_t done;
98 bool block;
99};
100
94/* VFOP queue filters command additional arguments */ 101/* VFOP queue filters command additional arguments */
95struct bnx2x_vfop_filter { 102struct bnx2x_vfop_filter {
96 struct list_head link; 103 struct list_head link;
@@ -405,6 +412,11 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
405 return vf->igu_base_id + q->index; 412 return vf->igu_base_id + q->index;
406} 413}
407 414
415static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
416{
417 return vfq_cl_id(vf, q);
418}
419
408static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) 420static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
409{ 421{
410 return vfq_cl_id(vf, q); 422 return vfq_cl_id(vf, q);
@@ -435,6 +447,45 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
435/* init */ 447/* init */
436int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, 448int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
437 dma_addr_t *sb_map); 449 dma_addr_t *sb_map);
450
451/* VFOP generic helpers */
452#define bnx2x_vfop_default(state) do { \
453 BNX2X_ERR("Bad state %d\n", (state)); \
454 vfop->rc = -EINVAL; \
455 goto op_err; \
456 } while (0)
457
458enum {
459 VFOP_DONE,
460 VFOP_CONT,
461 VFOP_VERIFY_PEND,
462};
463
464#define bnx2x_vfop_finalize(vf, rc, next) do { \
465 if ((rc) < 0) \
466 goto op_err; \
467 else if ((rc) > 0) \
468 goto op_pending; \
469 else if ((next) == VFOP_DONE) \
470 goto op_done; \
471 else if ((next) == VFOP_VERIFY_PEND) \
472 BNX2X_ERR("expected pending\n"); \
473 else { \
474 DP(BNX2X_MSG_IOV, "no ramrod. scheduling\n"); \
475 atomic_set(&vf->op_in_progress, 1); \
476 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); \
477 return; \
478 } \
479 } while (0)
480
481#define bnx2x_vfop_opset(first_state, trans_hndlr, done_hndlr) \
482 do { \
483 vfop->state = first_state; \
484 vfop->op_p = &vf->op_params; \
485 vfop->transition = trans_hndlr; \
486 vfop->done = done_hndlr; \
487 } while (0)
488
438static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp, 489static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
439 struct bnx2x_virtf *vf) 490 struct bnx2x_virtf *vf)
440{ 491{
@@ -443,6 +494,132 @@ static inline struct bnx2x_vfop *bnx2x_vfop_cur(struct bnx2x *bp,
443 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link); 494 return list_first_entry(&vf->op_list_head, struct bnx2x_vfop, link);
444} 495}
445 496
497static inline struct bnx2x_vfop *bnx2x_vfop_add(struct bnx2x *bp,
498 struct bnx2x_virtf *vf)
499{
500 struct bnx2x_vfop *vfop = kzalloc(sizeof(*vfop), GFP_KERNEL);
501
502 WARN(!mutex_is_locked(&vf->op_mutex), "about to access vf op linked list but mutex was not locked!");
503 if (vfop) {
504 INIT_LIST_HEAD(&vfop->link);
505 list_add(&vfop->link, &vf->op_list_head);
506 }
507 return vfop;
508}
509
510static inline void bnx2x_vfop_end(struct bnx2x *bp, struct bnx2x_virtf *vf,
511 struct bnx2x_vfop *vfop)
512{
513 /* rc < 0 - error, otherwise set to 0 */
514 DP(BNX2X_MSG_IOV, "rc was %d\n", vfop->rc);
515 if (vfop->rc >= 0)
516 vfop->rc = 0;
517 DP(BNX2X_MSG_IOV, "rc is now %d\n", vfop->rc);
518
519 /* unlink the current op context and propagate error code
520 * must be done before invoking the 'done()' handler
521 */
522 WARN(!mutex_is_locked(&vf->op_mutex),
523 "about to access vf op linked list but mutex was not locked!");
524 list_del(&vfop->link);
525
526 if (list_empty(&vf->op_list_head)) {
527 DP(BNX2X_MSG_IOV, "list was empty %d\n", vfop->rc);
528 vf->op_rc = vfop->rc;
529 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
530 vf->op_rc, vfop->rc);
531 } else {
532 struct bnx2x_vfop *cur_vfop;
533
534 DP(BNX2X_MSG_IOV, "list not empty %d\n", vfop->rc);
535 cur_vfop = bnx2x_vfop_cur(bp, vf);
536 cur_vfop->rc = vfop->rc;
537 DP(BNX2X_MSG_IOV, "copying rc vf->op_rc %d, vfop->rc %d\n",
538 vf->op_rc, vfop->rc);
539 }
540
541 /* invoke done handler */
542 if (vfop->done) {
543 DP(BNX2X_MSG_IOV, "calling done handler\n");
544 vfop->done(bp, vf);
545 }
546
547 DP(BNX2X_MSG_IOV, "done handler complete. vf->op_rc %d, vfop->rc %d\n",
548 vf->op_rc, vfop->rc);
549
550 /* if this is the last nested op reset the wait_blocking flag
551 * to release any blocking wrappers, only after 'done()' is invoked
552 */
553 if (list_empty(&vf->op_list_head)) {
554 DP(BNX2X_MSG_IOV, "list was empty after done %d\n", vfop->rc);
555 vf->op_wait_blocking = false;
556 }
557
558 kfree(vfop);
559}
560
561static inline int bnx2x_vfop_wait_blocking(struct bnx2x *bp,
562 struct bnx2x_virtf *vf)
563{
564 /* can take a while if any port is running */
565 int cnt = 5000;
566
567 might_sleep();
568 while (cnt--) {
569 if (vf->op_wait_blocking == false) {
570#ifdef BNX2X_STOP_ON_ERROR
571 DP(BNX2X_MSG_IOV, "exit (cnt %d)\n", 5000 - cnt);
572#endif
573 return 0;
574 }
575 usleep_range(1000, 2000);
576
577 if (bp->panic)
578 return -EIO;
579 }
580
581 /* timeout! */
582#ifdef BNX2X_STOP_ON_ERROR
583 bnx2x_panic();
584#endif
585
586 return -EBUSY;
587}
588
589static inline int bnx2x_vfop_transition(struct bnx2x *bp,
590 struct bnx2x_virtf *vf,
591 vfop_handler_t transition,
592 bool block)
593{
594 if (block)
595 vf->op_wait_blocking = true;
596 transition(bp, vf);
597 if (block)
598 return bnx2x_vfop_wait_blocking(bp, vf);
599 return 0;
600}
601
602/* VFOP queue construction helpers */
603void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
604 struct bnx2x_queue_init_params *init_params,
605 struct bnx2x_queue_setup_params *setup_params,
606 u16 q_idx, u16 sb_idx);
607
608void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
609 struct bnx2x_queue_init_params *init_params,
610 struct bnx2x_queue_setup_params *setup_params,
611 u16 q_idx, u16 sb_idx);
612
613void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
614 struct bnx2x_virtf *vf,
615 struct bnx2x_vf_queue *q,
616 struct bnx2x_vfop_qctor_params *p,
617 unsigned long q_type);
618int bnx2x_vfop_qsetup_cmd(struct bnx2x *bp,
619 struct bnx2x_virtf *vf,
620 struct bnx2x_vfop_cmd *cmd,
621 int qid);
622
446int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid); 623int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid);
447u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf); 624u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf);
448/* VF FLR helpers */ 625/* VF FLR helpers */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 7aa0e4f5346a..6605567e4b0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -370,6 +370,149 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
370 bnx2x_vf_mbx_resp(bp, vf); 370 bnx2x_vf_mbx_resp(bp, vf);
371} 371}
372 372
373/* convert MBX queue-flags to standard SP queue-flags */
374static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
375 unsigned long *sp_q_flags)
376{
377 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
378 __set_bit(BNX2X_Q_FLG_TPA, sp_q_flags);
379 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_IPV6)
380 __set_bit(BNX2X_Q_FLG_TPA_IPV6, sp_q_flags);
381 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA_GRO)
382 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
383 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
384 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
385 if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
386 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
387 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
388 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
389 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
390 __set_bit(BNX2X_Q_FLG_COS, sp_q_flags);
391 if (mbx_q_flags & VFPF_QUEUE_FLG_HC)
392 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
393 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
394 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
395}
396
397static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
398 struct bnx2x_vf_mbx *mbx)
399{
400 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q;
401 struct bnx2x_vfop_cmd cmd = {
402 .done = bnx2x_vf_mbx_resp,
403 .block = false,
404 };
405
406 /* verify vf_qid */
407 if (setup_q->vf_qid >= vf_rxq_count(vf)) {
408 BNX2X_ERR("vf_qid %d invalid, max queue count is %d\n",
409 setup_q->vf_qid, vf_rxq_count(vf));
410 vf->op_rc = -EINVAL;
411 goto response;
412 }
413
414 /* tx queues must be setup alongside rx queues thus if the rx queue
415 * is not marked as valid there's nothing to do.
416 */
417 if (setup_q->param_valid & (VFPF_RXQ_VALID|VFPF_TXQ_VALID)) {
418 struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid);
419 unsigned long q_type = 0;
420
421 struct bnx2x_queue_init_params *init_p;
422 struct bnx2x_queue_setup_params *setup_p;
423
424 /* reinit the VF operation context */
425 memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
426 setup_p = &vf->op_params.qctor.prep_qsetup;
427 init_p = &vf->op_params.qctor.qstate.params.init;
428
429 /* activate immediately */
430 __set_bit(BNX2X_Q_FLG_ACTIVE, &setup_p->flags);
431
432 if (setup_q->param_valid & VFPF_TXQ_VALID) {
433 struct bnx2x_txq_setup_params *txq_params =
434 &setup_p->txq_params;
435
436 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
437
438 /* save sb resource index */
439 q->sb_idx = setup_q->txq.vf_sb;
440
441 /* tx init */
442 init_p->tx.hc_rate = setup_q->txq.hc_rate;
443 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
444
445 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
446 &init_p->tx.flags);
447
448 /* tx setup - flags */
449 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags,
450 &setup_p->flags);
451
452 /* tx setup - general, nothing */
453
454 /* tx setup - tx */
455 txq_params->dscr_map = setup_q->txq.txq_addr;
456 txq_params->sb_cq_index = setup_q->txq.sb_index;
457 txq_params->traffic_type = setup_q->txq.traffic_type;
458
459 bnx2x_vfop_qctor_dump_tx(bp, vf, init_p, setup_p,
460 q->index, q->sb_idx);
461 }
462
463 if (setup_q->param_valid & VFPF_RXQ_VALID) {
464 struct bnx2x_rxq_setup_params *rxq_params =
465 &setup_p->rxq_params;
466
467 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
468
469 /* Note: there is no support for different SBs
470 * for TX and RX
471 */
472 q->sb_idx = setup_q->rxq.vf_sb;
473
474 /* rx init */
475 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
476 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
477 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
478 &init_p->rx.flags);
479
480 /* rx setup - flags */
481 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags,
482 &setup_p->flags);
483
484 /* rx setup - general */
485 setup_p->gen_params.mtu = setup_q->rxq.mtu;
486
487 /* rx setup - rx */
488 rxq_params->drop_flags = setup_q->rxq.drop_flags;
489 rxq_params->dscr_map = setup_q->rxq.rxq_addr;
490 rxq_params->sge_map = setup_q->rxq.sge_addr;
491 rxq_params->rcq_map = setup_q->rxq.rcq_addr;
492 rxq_params->rcq_np_map = setup_q->rxq.rcq_np_addr;
493 rxq_params->buf_sz = setup_q->rxq.buf_sz;
494 rxq_params->tpa_agg_sz = setup_q->rxq.tpa_agg_sz;
495 rxq_params->max_sges_pkt = setup_q->rxq.max_sge_pkt;
496 rxq_params->sge_buf_sz = setup_q->rxq.sge_buf_sz;
497 rxq_params->cache_line_log =
498 setup_q->rxq.cache_line_log;
499 rxq_params->sb_cq_index = setup_q->rxq.sb_index;
500
501 bnx2x_vfop_qctor_dump_rx(bp, vf, init_p, setup_p,
502 q->index, q->sb_idx);
503 }
504 /* complete the preparations */
505 bnx2x_vfop_qctor_prep(bp, vf, q, &vf->op_params.qctor, q_type);
506
507 vf->op_rc = bnx2x_vfop_qsetup_cmd(bp, vf, &cmd, q->index);
508 if (vf->op_rc)
509 goto response;
510 return;
511 }
512response:
513 bnx2x_vf_mbx_resp(bp, vf);
514}
515
373/* dispatch request */ 516/* dispatch request */
374static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf, 517static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
375 struct bnx2x_vf_mbx *mbx) 518 struct bnx2x_vf_mbx *mbx)
@@ -391,6 +534,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
391 case CHANNEL_TLV_INIT: 534 case CHANNEL_TLV_INIT:
392 bnx2x_vf_mbx_init_vf(bp, vf, mbx); 535 bnx2x_vf_mbx_init_vf(bp, vf, mbx);
393 break; 536 break;
537 case CHANNEL_TLV_SETUP_Q:
538 bnx2x_vf_mbx_setup_q(bp, vf, mbx);
539 break;
394 } 540 }
395 } else { 541 } else {
396 /* unknown TLV - this may belong to a VF driver from the future 542 /* unknown TLV - this may belong to a VF driver from the future