aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_lib.c
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-09-19 20:23:07 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-10-01 15:49:21 -0400
commit28c2a64573881082222fd30f382af966b32d1f99 (patch)
tree6d3ceddf6c7a8d666e2b50158716931a57c17119 /drivers/net/ethernet/intel/ice/ice_lib.c
parent5153a18e57ff3f7ef8bc76d31a968116e7f1963d (diff)
ice: Move common functions out of ice_main.c part 4/7
This patch continues the code move out of ice_main.c The following top level functions (and related dependency functions) were moved to ice_lib.c: ice_vsi_alloc_rings ice_vsi_set_rss_params ice_vsi_set_num_qs ice_get_free_slot ice_vsi_init ice_vsi_clear_rings ice_vsi_alloc_arrays Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c414
1 files changed, 414 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 474ce5828bd4..df20d68c92ab 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -226,6 +226,102 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
226} 226}
227 227
228/** 228/**
229 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
230 * @vsi: VSI pointer
231 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
232 *
233 * On error: returns error code (negative)
234 * On success: returns 0
235 */
236int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors)
237{
238 struct ice_pf *pf = vsi->back;
239
240 /* allocate memory for both Tx and Rx ring pointers */
241 vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
242 sizeof(struct ice_ring *), GFP_KERNEL);
243 if (!vsi->tx_rings)
244 goto err_txrings;
245
246 vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
247 sizeof(struct ice_ring *), GFP_KERNEL);
248 if (!vsi->rx_rings)
249 goto err_rxrings;
250
251 if (alloc_qvectors) {
252 /* allocate memory for q_vector pointers */
253 vsi->q_vectors = devm_kcalloc(&pf->pdev->dev,
254 vsi->num_q_vectors,
255 sizeof(struct ice_q_vector *),
256 GFP_KERNEL);
257 if (!vsi->q_vectors)
258 goto err_vectors;
259 }
260
261 return 0;
262
263err_vectors:
264 devm_kfree(&pf->pdev->dev, vsi->rx_rings);
265err_rxrings:
266 devm_kfree(&pf->pdev->dev, vsi->tx_rings);
267err_txrings:
268 return -ENOMEM;
269}
270
271/**
272 * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
273 * @vsi: the VSI being configured
274 *
275 * Return 0 on success and a negative value on error
276 */
277void ice_vsi_set_num_qs(struct ice_vsi *vsi)
278{
279 struct ice_pf *pf = vsi->back;
280
281 switch (vsi->type) {
282 case ICE_VSI_PF:
283 vsi->alloc_txq = pf->num_lan_tx;
284 vsi->alloc_rxq = pf->num_lan_rx;
285 vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
286 vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
287 break;
288 default:
289 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
290 vsi->type);
291 break;
292 }
293}
294
295/**
296 * ice_get_free_slot - get the next non-NULL location index in array
297 * @array: array to search
298 * @size: size of the array
299 * @curr: last known occupied index to be used as a search hint
300 *
301 * void * is being used to keep the functionality generic. This lets us use this
302 * function on any array of pointers.
303 */
304int ice_get_free_slot(void *array, int size, int curr)
305{
306 int **tmp_array = (int **)array;
307 int next;
308
309 if (curr < (size - 1) && !tmp_array[curr + 1]) {
310 next = curr + 1;
311 } else {
312 int i = 0;
313
314 while ((i < size) && (tmp_array[i]))
315 i++;
316 if (i == size)
317 next = ICE_NO_VSI;
318 else
319 next = i;
320 }
321 return next;
322}
323
324/**
229 * ice_vsi_delete - delete a VSI from the switch 325 * ice_vsi_delete - delete a VSI from the switch
230 * @vsi: pointer to VSI being removed 326 * @vsi: pointer to VSI being removed
231 */ 327 */
@@ -287,6 +383,324 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
287} 383}
288 384
289/** 385/**
386 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
387 * @vsi: the VSI being configured
388 */
389void ice_vsi_set_rss_params(struct ice_vsi *vsi)
390{
391 struct ice_hw_common_caps *cap;
392 struct ice_pf *pf = vsi->back;
393
394 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
395 vsi->rss_size = 1;
396 return;
397 }
398
399 cap = &pf->hw.func_caps.common_cap;
400 switch (vsi->type) {
401 case ICE_VSI_PF:
402 /* PF VSI will inherit RSS instance of PF */
403 vsi->rss_table_size = cap->rss_table_size;
404 vsi->rss_size = min_t(int, num_online_cpus(),
405 BIT(cap->rss_table_entry_width));
406 vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
407 break;
408 default:
409 dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
410 vsi->type);
411 break;
412 }
413}
414
415/**
416 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
417 * @ctxt: the VSI context being set
418 *
419 * This initializes a default VSI context for all sections except the Queues.
420 */
421static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
422{
423 u32 table = 0;
424
425 memset(&ctxt->info, 0, sizeof(ctxt->info));
426 /* VSI's should be allocated from shared pool */
427 ctxt->alloc_from_pool = true;
428 /* Src pruning enabled by default */
429 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
430 /* Traffic from VSI can be sent to LAN */
431 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
432 /* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
433 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
434 * packets untagged/tagged.
435 */
436 ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
437 ICE_AQ_VSI_VLAN_MODE_M) >>
438 ICE_AQ_VSI_VLAN_MODE_S);
439 /* Have 1:1 UP mapping for both ingress/egress tables */
440 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
441 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
442 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
443 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
444 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
445 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
446 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
447 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
448 ctxt->info.ingress_table = cpu_to_le32(table);
449 ctxt->info.egress_table = cpu_to_le32(table);
450 /* Have 1:1 UP mapping for outer to inner UP table */
451 ctxt->info.outer_up_table = cpu_to_le32(table);
452 /* No Outer tag support outer_tag_flags remains to zero */
453}
454
455/**
456 * ice_vsi_setup_q_map - Setup a VSI queue map
457 * @vsi: the VSI being configured
458 * @ctxt: VSI context structure
459 */
460static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
461{
462 u16 offset = 0, qmap = 0, numq_tc;
463 u16 pow = 0, max_rss = 0, qcount;
464 u16 qcount_tx = vsi->alloc_txq;
465 u16 qcount_rx = vsi->alloc_rxq;
466 bool ena_tc0 = false;
467 int i;
468
469 /* at least TC0 should be enabled by default */
470 if (vsi->tc_cfg.numtc) {
471 if (!(vsi->tc_cfg.ena_tc & BIT(0)))
472 ena_tc0 = true;
473 } else {
474 ena_tc0 = true;
475 }
476
477 if (ena_tc0) {
478 vsi->tc_cfg.numtc++;
479 vsi->tc_cfg.ena_tc |= 1;
480 }
481
482 numq_tc = qcount_rx / vsi->tc_cfg.numtc;
483
484 /* TC mapping is a function of the number of Rx queues assigned to the
485 * VSI for each traffic class and the offset of these queues.
486 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
487 * queues allocated to TC0. No:of queues is a power-of-2.
488 *
489 * If TC is not enabled, the queue offset is set to 0, and allocate one
490 * queue, this way, traffic for the given TC will be sent to the default
491 * queue.
492 *
493 * Setup number and offset of Rx queues for all TCs for the VSI
494 */
495
496 /* qcount will change if RSS is enabled */
497 if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
498 if (vsi->type == ICE_VSI_PF)
499 max_rss = ICE_MAX_LG_RSS_QS;
500 else
501 max_rss = ICE_MAX_SMALL_RSS_QS;
502
503 qcount = min_t(int, numq_tc, max_rss);
504 qcount = min_t(int, qcount, vsi->rss_size);
505 } else {
506 qcount = numq_tc;
507 }
508
509 /* find the (rounded up) power-of-2 of qcount */
510 pow = order_base_2(qcount);
511
512 for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
513 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
514 /* TC is not enabled */
515 vsi->tc_cfg.tc_info[i].qoffset = 0;
516 vsi->tc_cfg.tc_info[i].qcount = 1;
517 ctxt->info.tc_mapping[i] = 0;
518 continue;
519 }
520
521 /* TC is enabled */
522 vsi->tc_cfg.tc_info[i].qoffset = offset;
523 vsi->tc_cfg.tc_info[i].qcount = qcount;
524
525 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
526 ICE_AQ_VSI_TC_Q_OFFSET_M) |
527 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
528 ICE_AQ_VSI_TC_Q_NUM_M);
529 offset += qcount;
530 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
531 }
532
533 vsi->num_txq = qcount_tx;
534 vsi->num_rxq = offset;
535
536 /* Rx queue mapping */
537 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
538 /* q_mapping buffer holds the info for the first queue allocated for
539 * this VSI in the PF space and also the number of queues associated
540 * with this VSI.
541 */
542 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
543 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
544}
545
546/**
547 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
548 * @ctxt: the VSI context being set
549 * @vsi: the VSI being configured
550 */
551static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
552{
553 u8 lut_type, hash_type;
554
555 switch (vsi->type) {
556 case ICE_VSI_PF:
557 /* PF VSI will inherit RSS instance of PF */
558 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
559 hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
560 break;
561 default:
562 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
563 vsi->type);
564 return;
565 }
566
567 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
568 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
569 ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
570 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
571}
572
573/**
574 * ice_vsi_init - Create and initialize a VSI
575 * @vsi: the VSI being configured
576 *
577 * This initializes a VSI context depending on the VSI type to be added and
578 * passes it down to the add_vsi aq command to create a new VSI.
579 */
580int ice_vsi_init(struct ice_vsi *vsi)
581{
582 struct ice_vsi_ctx ctxt = { 0 };
583 struct ice_pf *pf = vsi->back;
584 struct ice_hw *hw = &pf->hw;
585 int ret = 0;
586
587 switch (vsi->type) {
588 case ICE_VSI_PF:
589 ctxt.flags = ICE_AQ_VSI_TYPE_PF;
590 break;
591 default:
592 return -ENODEV;
593 }
594
595 ice_set_dflt_vsi_ctx(&ctxt);
596 /* if the switch is in VEB mode, allow VSI loopback */
597 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
598 ctxt.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
599
600 /* Set LUT type and HASH type if RSS is enabled */
601 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
602 ice_set_rss_vsi_ctx(&ctxt, vsi);
603
604 ctxt.info.sw_id = vsi->port_info->sw_id;
605 ice_vsi_setup_q_map(vsi, &ctxt);
606
607 ret = ice_add_vsi(hw, vsi->idx, &ctxt, NULL);
608 if (ret) {
609 dev_err(&pf->pdev->dev,
610 "Add VSI failed, err %d\n", ret);
611 return -EIO;
612 }
613
614 /* keep context for update VSI operations */
615 vsi->info = ctxt.info;
616
617 /* record VSI number returned */
618 vsi->vsi_num = ctxt.vsi_num;
619
620 return ret;
621}
622
623/**
624 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
625 * @vsi: the VSI having rings deallocated
626 */
627void ice_vsi_clear_rings(struct ice_vsi *vsi)
628{
629 int i;
630
631 if (vsi->tx_rings) {
632 for (i = 0; i < vsi->alloc_txq; i++) {
633 if (vsi->tx_rings[i]) {
634 kfree_rcu(vsi->tx_rings[i], rcu);
635 vsi->tx_rings[i] = NULL;
636 }
637 }
638 }
639 if (vsi->rx_rings) {
640 for (i = 0; i < vsi->alloc_rxq; i++) {
641 if (vsi->rx_rings[i]) {
642 kfree_rcu(vsi->rx_rings[i], rcu);
643 vsi->rx_rings[i] = NULL;
644 }
645 }
646 }
647}
648
649/**
650 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
651 * @vsi: VSI which is having rings allocated
652 */
653int ice_vsi_alloc_rings(struct ice_vsi *vsi)
654{
655 struct ice_pf *pf = vsi->back;
656 int i;
657
658 /* Allocate tx_rings */
659 for (i = 0; i < vsi->alloc_txq; i++) {
660 struct ice_ring *ring;
661
662 /* allocate with kzalloc(), free with kfree_rcu() */
663 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
664
665 if (!ring)
666 goto err_out;
667
668 ring->q_index = i;
669 ring->reg_idx = vsi->txq_map[i];
670 ring->ring_active = false;
671 ring->vsi = vsi;
672 ring->dev = &pf->pdev->dev;
673 ring->count = vsi->num_desc;
674 vsi->tx_rings[i] = ring;
675 }
676
677 /* Allocate rx_rings */
678 for (i = 0; i < vsi->alloc_rxq; i++) {
679 struct ice_ring *ring;
680
681 /* allocate with kzalloc(), free with kfree_rcu() */
682 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
683 if (!ring)
684 goto err_out;
685
686 ring->q_index = i;
687 ring->reg_idx = vsi->rxq_map[i];
688 ring->ring_active = false;
689 ring->vsi = vsi;
690 ring->netdev = vsi->netdev;
691 ring->dev = &pf->pdev->dev;
692 ring->count = vsi->num_desc;
693 vsi->rx_rings[i] = ring;
694 }
695
696 return 0;
697
698err_out:
699 ice_vsi_clear_rings(vsi);
700 return -ENOMEM;
701}
702
703/**
290 * ice_add_mac_to_list - Add a mac address filter entry to the list 704 * ice_add_mac_to_list - Add a mac address filter entry to the list
291 * @vsi: the VSI to be forwarded to 705 * @vsi: the VSI to be forwarded to
292 * @add_list: pointer to the list which contains MAC filter entries 706 * @add_list: pointer to the list which contains MAC filter entries