summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAchal Verma <achalv@nvidia.com>2021-06-25 03:57:41 -0400
committermobile promotions <svcmobile_promotions@nvidia.com>2021-08-13 16:11:13 -0400
commita17fd4c3052cb2af717bc17d4451e6562a7a6ee4 (patch)
tree07c539d0a75a5bb3301dce09ba2a52a6c02682c0 /drivers
parent35fe1101fc0bf0e423936d1adb40d9bb28eeee74 (diff)
EQOS: Enable DMA selection with filtering.
This change allows to configure filtering such that specified DMA channel is selected for packets which match filtering criteria. Bug 200743454 Change-Id: If990de0b499c36466258ce7f7cd79e1abe717537 Signed-off-by: Achal Verma <achalv@nvidia.com> Reviewed-on: https://git-master.nvidia.com/r/c/linux-nvidia/+/2575326 Tested-by: mobile promotions <svcmobile_promotions@nvidia.com> Reviewed-by: svc-mobile-coverity <svc-mobile-coverity@nvidia.com> Reviewed-by: Phoenix Jung <pjung@nvidia.com> Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com> GVS: Gerrit_Virtual_Submit
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/nvidia/eqos/Makefile4
-rw-r--r--drivers/net/ethernet/nvidia/eqos/dev.c141
-rw-r--r--drivers/net/ethernet/nvidia/eqos/drv.c364
-rw-r--r--drivers/net/ethernet/nvidia/eqos/init.c33
-rw-r--r--drivers/net/ethernet/nvidia/eqos/yapphdr.h21
-rw-r--r--drivers/net/ethernet/nvidia/eqos/yheader.h22
-rw-r--r--drivers/net/ethernet/nvidia/eqos/yregacc.h16
7 files changed, 521 insertions, 80 deletions
diff --git a/drivers/net/ethernet/nvidia/eqos/Makefile b/drivers/net/ethernet/nvidia/eqos/Makefile
index a954463fb..10d3f55d3 100644
--- a/drivers/net/ethernet/nvidia/eqos/Makefile
+++ b/drivers/net/ethernet/nvidia/eqos/Makefile
@@ -1,4 +1,4 @@
1# Copyright (c) 2015, NVIDIA CORPORATION. All rights reserved. 1# Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
2# 2#
3# This program is free software; you can redistribute it and/or modify it 3# This program is free software; you can redistribute it and/or modify it
4# under the terms and conditions of the GNU General Public License, 4# under the terms and conditions of the GNU General Public License,
@@ -15,7 +15,7 @@ ccflags-y += -DHWA_NV_1650337
15ccflags-y += -DHWA_FPGA_ONLY 15ccflags-y += -DHWA_FPGA_ONLY
16 16
17## default build settings. 17## default build settings.
18ccflags-y += -DCONFIG_PTPSUPPORT_OBJ 18ccflags-y += -DCONFIG_PTPSUPPORT_OBJ -DFILTER_DEBUGFS
19 19
20## enable this only for MODS verification 20## enable this only for MODS verification
21ifeq ($(CONFIG_DISABLE_EQOS_CTRL_TRISTATE),y) 21ifeq ($(CONFIG_DISABLE_EQOS_CTRL_TRISTATE),y)
diff --git a/drivers/net/ethernet/nvidia/eqos/dev.c b/drivers/net/ethernet/nvidia/eqos/dev.c
index fd05f2dbf..e8eb921e9 100644
--- a/drivers/net/ethernet/nvidia/eqos/dev.c
+++ b/drivers/net/ethernet/nvidia/eqos/dev.c
@@ -429,6 +429,79 @@ static INT update_l4_sa_port_no(INT filter_no, USHORT port_no)
429 return Y_SUCCESS; 429 return Y_SUCCESS;
430} 430}
431 431
432/**
433 * @brief eqos_set_l3mask - update l3 filter address mask
434 *
435 * @note
436 * Algorithm:
437 * This sequence is used to configure L3((IPv4/IPv6)
438 * filter address mask for address matching.
439 *
440 * @param[in] value
441 * @param[in] l3_mask
442 *
443 * @note
444 * API Group:
445 * - Initialization: Yes
446 * - Run time: Yes
447 * - De-initialization: No
448 *
449 *@return updated UINT value
450 */
451
452static inline UINT eqos_set_l3mask(UINT value, USHORT l3_mask, BOOL is_dst)
453{
454 UINT t_val = value;
455
456 if (is_dst)
457 t_val |= ((l3_mask <<
458 MAC_L3L4_CTR_L3HDBM0_SHIFT) &
459 MAC_L3L4_CTR_L3HDBM0);
460 else
461 t_val |= ((l3_mask <<
462 MAC_L3L4_CTR_L3HSBM0_SHIFT) &
463 MAC_L3L4_CTR_L3HSBM0);
464
465 return t_val;
466}
467
468/**
469 * @brief eqos_set_dcs - update dma routing register
470 *
471 * @note
472 * Algorithm:
473 * This sequence is used to configure L3((IPv4/IPv6)
474 * filters for address matching.
475 *
476 * @param[in] value
477 * @param[in] dma_routing_enable
478 * @param[in] dma_chan
479 *
480 * @note
481 * API Group:
482 * - Initialization: Yes
483 * - Run time: Yes
484 * - De-initialization: No
485 *
486 *@return updated UINT value
487 */
488
489static inline UINT eqos_set_dcs(UINT value,
490 UINT dma_routing_enable,
491 UINT dma_chan)
492{
493 UINT t_val = value;
494
495 t_val |= ((dma_routing_enable <<
496 MAC_L3L4_CTR_DMCHEN0_SHIFT) &
497 MAC_L3L4_CTR_DMCHEN0);
498 t_val |= ((dma_chan <<
499 MAC_L3L4_CTR_DMCHN0_SHIFT) &
500 MAC_L3L4_CTR_DMCHN0);
501
502 return t_val;
503}
504
432/*! 505/*!
433* \brief This sequence is used to configure L4(TCP/UDP) filters for 506* \brief This sequence is used to configure L4(TCP/UDP) filters for
434* SA and DA Port Number matching 507* SA and DA Port Number matching
@@ -441,12 +514,12 @@ static INT update_l4_sa_port_no(INT filter_no, USHORT port_no)
441* \retval -1 Failure 514* \retval -1 Failure
442*/ 515*/
443 516
444static INT config_l4_filters(INT filter_no, 517static INT config_l4_filters(INT filter_no, INT enb_dis,
445 INT enb_dis, 518 INT tcp_udp_match, INT src_dst_port_match,
446 INT tcp_udp_match, 519 INT perfect_inverse_match, INT dma_routing_enable,
447 INT src_dst_port_match, INT perfect_inverse_match) 520 USHORT dma_channel)
448{ 521{
449 522 UINT val = 0U;
450 MAC_L3L4CR_L4PEN0_WR(filter_no, tcp_udp_match); 523 MAC_L3L4CR_L4PEN0_WR(filter_no, tcp_udp_match);
451 524
452 if (src_dst_port_match == 0) { 525 if (src_dst_port_match == 0) {
@@ -454,6 +527,10 @@ static INT config_l4_filters(INT filter_no,
454 /* Enable L4 filters for SOURCE Port No matching */ 527 /* Enable L4 filters for SOURCE Port No matching */
455 MAC_L3L4CR_L4SPM0_WR(filter_no, 0x1); 528 MAC_L3L4CR_L4SPM0_WR(filter_no, 0x1);
456 MAC_L3L4CR_L4SPIM0_WR(filter_no, perfect_inverse_match); 529 MAC_L3L4CR_L4SPIM0_WR(filter_no, perfect_inverse_match);
530 MAC_L3L4CR_RD(filter_no, val);
531 val = eqos_set_dcs(val, dma_routing_enable,
532 dma_channel);
533 MAC_L3L4CR_WR(filter_no, val);
457 } else { 534 } else {
458 /* Disable L4 filters for SOURCE Port No matching */ 535 /* Disable L4 filters for SOURCE Port No matching */
459 MAC_L3L4CR_L4SPM0_WR(filter_no, 0x0); 536 MAC_L3L4CR_L4SPM0_WR(filter_no, 0x0);
@@ -464,6 +541,10 @@ static INT config_l4_filters(INT filter_no,
464 /* Enable L4 filters for DESTINATION port No matching */ 541 /* Enable L4 filters for DESTINATION port No matching */
465 MAC_L3L4CR_L4DPM0_WR(filter_no, 0x1); 542 MAC_L3L4CR_L4DPM0_WR(filter_no, 0x1);
466 MAC_L3L4CR_L4DPIM0_WR(filter_no, perfect_inverse_match); 543 MAC_L3L4CR_L4DPIM0_WR(filter_no, perfect_inverse_match);
544 MAC_L3L4CR_RD(filter_no, val);
545 val = eqos_set_dcs(val, dma_routing_enable,
546 dma_channel);
547 MAC_L3L4CR_WR(filter_no, val);
467 } else { 548 } else {
468 /* Disable L4 filters for DESTINATION port No matching */ 549 /* Disable L4 filters for DESTINATION port No matching */
469 MAC_L3L4CR_L4DPM0_WR(filter_no, 0x0); 550 MAC_L3L4CR_L4DPM0_WR(filter_no, 0x0);
@@ -548,8 +629,13 @@ static INT update_ip4_addr0(INT filter_no, UCHAR addr[])
548static INT config_l3_filters(INT filter_no, 629static INT config_l3_filters(INT filter_no,
549 INT enb_dis, 630 INT enb_dis,
550 INT ipv4_ipv6_match, 631 INT ipv4_ipv6_match,
551 INT src_dst_addr_match, INT perfect_inverse_match) 632 INT src_dst_addr_match,
633 INT perfect_inverse_match,
634 INT dma_routing_enable,
635 USHORT dma_channel,
636 USHORT l3_mask)
552{ 637{
638 UINT val = 0U;
553 MAC_L3L4CR_L3PEN0_WR(filter_no, ipv4_ipv6_match); 639 MAC_L3L4CR_L3PEN0_WR(filter_no, ipv4_ipv6_match);
554 640
555 /* For IPv6 either SA/DA can be checked, not both */ 641 /* For IPv6 either SA/DA can be checked, not both */
@@ -559,16 +645,24 @@ static INT config_l3_filters(INT filter_no,
559 /* Enable L3 filters for IPv6 SOURCE addr matching */ 645 /* Enable L3 filters for IPv6 SOURCE addr matching */
560 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x1); 646 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x1);
561 MAC_L3L4CR_L3SAIM0_WR(filter_no, 647 MAC_L3L4CR_L3SAIM0_WR(filter_no,
562 perfect_inverse_match); 648 perfect_inverse_match);
563 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x0); 649 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x0);
564 MAC_L3L4CR_L3DAIM0_WR(filter_no, 0x0); 650 MAC_L3L4CR_L3DAIM0_WR(filter_no, 0x0);
651 MAC_L3L4CR_RD(filter_no, val);
652 val = eqos_set_dcs(val, dma_routing_enable,
653 dma_channel);
654 MAC_L3L4CR_WR(filter_no, val);
565 } else { 655 } else {
566 /* Enable L3 filters for IPv6 DESTINATION addr matching */ 656 /* Enable L3 filters for IPv6 DESTINATION addr matching */
567 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x0); 657 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x0);
568 MAC_L3L4CR_L3SAIM0_WR(filter_no, 0x0); 658 MAC_L3L4CR_L3SAIM0_WR(filter_no, 0x0);
569 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x1); 659 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x1);
570 MAC_L3L4CR_L3DAIM0_WR(filter_no, 660 MAC_L3L4CR_L3DAIM0_WR(filter_no,
571 perfect_inverse_match); 661 perfect_inverse_match);
662 MAC_L3L4CR_RD(filter_no, val);
663 val = eqos_set_dcs(val, dma_routing_enable,
664 dma_channel);
665 MAC_L3L4CR_WR(filter_no, val);
572 } 666 }
573 } else { 667 } else {
574 /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr matching */ 668 /* Disable L3 filters for IPv6 SOURCE/DESTINATION addr matching */
@@ -584,7 +678,14 @@ static INT config_l3_filters(INT filter_no,
584 /* Enable L3 filters for IPv4 SOURCE addr matching */ 678 /* Enable L3 filters for IPv4 SOURCE addr matching */
585 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x1); 679 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x1);
586 MAC_L3L4CR_L3SAIM0_WR(filter_no, 680 MAC_L3L4CR_L3SAIM0_WR(filter_no,
587 perfect_inverse_match); 681 perfect_inverse_match);
682 MAC_L3L4CR_L3HSBM0_WR(filter_no, l3_mask);
683 MAC_L3L4CR_RD(filter_no, val);
684 val = eqos_set_dcs(val, dma_routing_enable,
685 dma_channel);
686 val = eqos_set_l3mask(val, l3_mask,
687 src_dst_addr_match);
688 MAC_L3L4CR_WR(filter_no, val);
588 } else { 689 } else {
589 /* Disable L3 filters for IPv4 SOURCE addr matching */ 690 /* Disable L3 filters for IPv4 SOURCE addr matching */
590 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x0); 691 MAC_L3L4CR_L3SAM0_WR(filter_no, 0x0);
@@ -596,6 +697,12 @@ static INT config_l3_filters(INT filter_no,
596 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x1); 697 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x1);
597 MAC_L3L4CR_L3DAIM0_WR(filter_no, 698 MAC_L3L4CR_L3DAIM0_WR(filter_no,
598 perfect_inverse_match); 699 perfect_inverse_match);
700 MAC_L3L4CR_RD(filter_no, val);
701 val = eqos_set_dcs(val, dma_routing_enable,
702 dma_channel);
703 val = eqos_set_l3mask(val, l3_mask,
704 src_dst_addr_match);
705 MAC_L3L4CR_WR(filter_no, val);
599 } else { 706 } else {
600 /* Disable L3 filters for IPv4 DESTINATION addr matching */ 707 /* Disable L3 filters for IPv4 DESTINATION addr matching */
601 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x0); 708 MAC_L3L4CR_L3DAM0_WR(filter_no, 0x0);
@@ -3848,7 +3955,8 @@ static INT configure_mac(struct eqos_prv_data *pdata)
3848 3955
3849static INT eqos_yinit(struct eqos_prv_data *pdata) 3956static INT eqos_yinit(struct eqos_prv_data *pdata)
3850{ 3957{
3851 UINT qinx; 3958 struct eqos_cfg *dt_cfg = &pdata->dt_cfg;
3959 UINT qinx, val = 0U;
3852 int i, j; 3960 int i, j;
3853 3961
3854 pr_debug("-->eqos_yinit\n"); 3962 pr_debug("-->eqos_yinit\n");
@@ -3859,8 +3967,17 @@ static INT eqos_yinit(struct eqos_prv_data *pdata)
3859 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++) { 3967 for (qinx = 0; qinx < EQOS_TX_QUEUE_CNT; qinx++) {
3860 configure_mtl_queue(qinx, pdata); 3968 configure_mtl_queue(qinx, pdata);
3861 } 3969 }
3862 /* Mapping MTL Rx queue and DMA Rx channel */ 3970 /* Mapping MTL Rx queue and DMA Rx channel
3863 MTL_RQDCM0R_WR(0x3020100); 3971 * dt entry queue_dma_map defines 0 for static
3972 * 1 for dynamic queue to dma mapping.
3973 * For static case queue n is mapped to dma channel n
3974 */
3975 val = ((dt_cfg->q_dma_map[3] ? BIT(28) : (3 << 24)) | \
3976 (dt_cfg->q_dma_map[2] ? BIT(20) : (2 << 16)) | \
3977 (dt_cfg->q_dma_map[1] ? BIT(12) : (1 << 8)) | \
3978 (dt_cfg->q_dma_map[0] ? BIT(4) : (0)));
3979
3980 MTL_RQDCM0R_WR(val);
3864 3981
3865 i = (VIRT_INTR_CH_CRTL_RX_WR_MASK | VIRT_INTR_CH_CRTL_TX_WR_MASK); 3982 i = (VIRT_INTR_CH_CRTL_RX_WR_MASK | VIRT_INTR_CH_CRTL_TX_WR_MASK);
3866 for (j = 0; j < pdata->num_chans; j++) { 3983 for (j = 0; j < pdata->num_chans; j++) {
diff --git a/drivers/net/ethernet/nvidia/eqos/drv.c b/drivers/net/ethernet/nvidia/eqos/drv.c
index 7e2d80c75..7c7bb2605 100644
--- a/drivers/net/ethernet/nvidia/eqos/drv.c
+++ b/drivers/net/ethernet/nvidia/eqos/drv.c
@@ -47,6 +47,9 @@
47#include <linux/gpio.h> 47#include <linux/gpio.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/platform/tegra/ptp-notifier.h> 49#include <linux/platform/tegra/ptp-notifier.h>
50#ifdef FILTER_DEBUGFS
51#include <linux/debugfs.h>
52#endif
50#include <linux/reset.h> 53#include <linux/reset.h>
51#include "yheader.h" 54#include "yheader.h"
52#include "yapphdr.h" 55#include "yapphdr.h"
@@ -2604,6 +2607,152 @@ static int eqos_config_l3_l4_filtering(struct net_device *dev,
2604 return ret; 2607 return ret;
2605} 2608}
2606 2609
2610#ifdef FILTER_DEBUGFS
2611struct fdata {
2612 struct dentry *entry;
2613 int index;
2614 int type;
2615 size_t size;
2616 char name[64];
2617 struct list_head list;
2618};
2619
2620static int __eqos_debugfs_show(struct seq_file *file, void *unused)
2621{
2622 struct fdata *data = file->private;
2623 struct eqos_l3_l4_filter *f = (struct eqos_l3_l4_filter *)(data + 1);
2624
2625 switch (data->type) {
2626 case 0:
2627 seq_printf(file, "Layer 3 filter\n");
2628 seq_printf(file,
2629 "..filter_no = %d\n..enable? = %s\n..src/dst = %s\n..type = %s\n"
2630 "..ip = %d.%d.%d.%d\n..mask = %d\n..dma routing? = %s\n..dma = %d\n",
2631 f->filter_no, f->filter_enb_dis ? "YES" : "NO",
2632 f->src_dst_addr_match == 0 ? "SOURCE" : "DESTINATION",
2633 f->perfect_inverse_match == 0 ? "PERFECT" : "INVERSE",
2634 f->ip4_addr[0], f->ip4_addr[1], f->ip4_addr[2], f->ip4_addr[3],
2635 f->l3_mask, f->dma_routing_enable ? "YES" : "NO",
2636 f->dma_channel);
2637 break;
2638
2639 case 1:
2640 seq_printf(file, "Layer 4 filter\n");
2641 seq_printf(file,
2642 "..filter_no = %d\n..enable? = %s\n..src/dst = %s\n..type = %s\n"
2643 "..port = %d\n..dma routing? = %s\n..dma = %d\n",
2644 f->filter_no, f->filter_enb_dis ? "YES" : "NO",
2645 f->src_dst_addr_match == 0 ? "SOURCE" : "DESTINATION",
2646 f->perfect_inverse_match == 0 ? "PERFECT" : "INVERSE",
2647 f->port_no, f->dma_routing_enable ? "YES" : "NO",
2648 f->dma_channel);
2649 break;
2650
2651 default:
2652 break;
2653 }
2654
2655 return 0;
2656}
2657
2658static int __eqos_debugfs_open(struct inode *inode, struct file *file)
2659{
2660 return single_open(file, __eqos_debugfs_show, inode->i_private);
2661}
2662
2663static const struct file_operations __eqos_filter_fops = {
2664 .open = __eqos_debugfs_open,
2665 .read = seq_read,
2666 .llseek = seq_lseek,
2667 .release = single_release,
2668};
2669
2670static int __eqos_debugfs_filter(struct eqos_prv_data *pdata, int index,
2671 int type, void *data, size_t sz)
2672{
2673 struct fdata *pos, *new;
2674
2675 list_for_each_entry(pos, &pdata->d_head, list) {
2676 if (pos->type == type && pos->index == index) {
2677 debugfs_remove_recursive(pos->entry);
2678 list_del_init(&pos->list);
2679 devm_kfree(&pdata->pdev->dev, pos);
2680 break;
2681 }
2682 }
2683 new = devm_kzalloc(&pdata->pdev->dev, sizeof *new + sz, GFP_KERNEL);
2684 if (!new)
2685 return -ENOMEM;
2686 new->type = type;
2687 new->index = index;
2688 memcpy(new + 1, data, sz);
2689 snprintf(new->name, sizeof(new->name), "filter_%02d", index);
2690 new->entry = debugfs_create_file(new->name, 0440, pdata->d_root, new,
2691 &__eqos_filter_fops);
2692 INIT_LIST_HEAD(&new->list);
2693 list_add(&new->list, &pdata->d_head);
2694 return 0;
2695}
2696#endif
2697
2698static int __eqos_config_ip4_filters(struct eqos_prv_data *pdata,
2699 struct eqos_l3_l4_filter *l3_filter)
2700{
2701 struct hw_if_struct *hw_if = &(pdata->hw_if);
2702 int ret = 0;
2703
2704 if (pdata->hw_feat.l3l4_filter_num == 0)
2705 return EQOS_NO_HW_SUPPORT;
2706
2707 if ((l3_filter->filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2708 pr_err("%d filter is not supported in the HW\n",
2709 l3_filter->filter_no);
2710 return EQOS_NO_HW_SUPPORT;
2711 }
2712
2713 if ((l3_filter->dma_routing_enable == EQOS_DMA_FILTER_ENABLE) &&
2714 (l3_filter->dma_channel > (pdata->hw_feat.rx_ch_cnt))) {
2715 pr_err("%u dma channel is not supported in the HW\n",
2716 l3_filter->dma_channel);
2717 return EQOS_NO_HW_SUPPORT;
2718 }
2719
2720
2721 if (!pdata->l3_l4_filter) {
2722 hw_if->config_l3_l4_filter_enable(1);
2723 pdata->l3_l4_filter = 1;
2724 }
2725
2726 /* configure the L3 filters */
2727 hw_if->config_l3_filters(l3_filter->filter_no,
2728 l3_filter->filter_enb_dis, 0,
2729 l3_filter->src_dst_addr_match,
2730 l3_filter->perfect_inverse_match,
2731 l3_filter->dma_routing_enable,
2732 l3_filter->dma_channel,
2733 l3_filter->l3_mask);
2734
2735 if (!l3_filter->src_dst_addr_match)
2736 hw_if->update_ip4_addr0(l3_filter->filter_no,
2737 l3_filter->ip4_addr);
2738 else
2739 hw_if->update_ip4_addr1(l3_filter->filter_no,
2740 l3_filter->ip4_addr);
2741#ifdef FILTER_DEBUGFS
2742 __eqos_debugfs_filter(pdata, l3_filter->filter_no, 0, l3_filter,
2743 sizeof(*l3_filter));
2744#endif
2745
2746 DBGPR_FILTER
2747 ("Successfully %s IPv4 %s %s addressing filtering on %d filter\n",
2748 (l3_filter->filter_enb_dis ? "ENABLED" : "DISABLED"),
2749 (l3_filter->perfect_inverse_match ? "INVERSE" : "PERFECT"),
2750 (l3_filter->src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2751 l3_filter->filter_no);
2752
2753 return ret;
2754}
2755
2607/*! 2756/*!
2608 * \details This function is invoked by ioctl function when user issues an 2757 * \details This function is invoked by ioctl function when user issues an
2609 * ioctl command to configure L3(IPv4) filtering. This function does following, 2758 * ioctl command to configure L3(IPv4) filtering. This function does following,
@@ -2623,7 +2772,6 @@ static int eqos_config_ip4_filters(struct net_device *dev,
2623 struct ifr_data_struct *req) 2772 struct ifr_data_struct *req)
2624{ 2773{
2625 struct eqos_prv_data *pdata = netdev_priv(dev); 2774 struct eqos_prv_data *pdata = netdev_priv(dev);
2626 struct hw_if_struct *hw_if = &(pdata->hw_if);
2627 struct eqos_l3_l4_filter *u_l3_filter = 2775 struct eqos_l3_l4_filter *u_l3_filter =
2628 (struct eqos_l3_l4_filter *)req->ptr; 2776 (struct eqos_l3_l4_filter *)req->ptr;
2629 struct eqos_l3_l4_filter l_l3_filter; 2777 struct eqos_l3_l4_filter l_l3_filter;
@@ -2638,36 +2786,7 @@ static int eqos_config_ip4_filters(struct net_device *dev,
2638 sizeof(struct eqos_l3_l4_filter))) 2786 sizeof(struct eqos_l3_l4_filter)))
2639 return -EFAULT; 2787 return -EFAULT;
2640 2788
2641 if ((l_l3_filter.filter_no + 1) > pdata->hw_feat.l3l4_filter_num) { 2789 ret = __eqos_config_ip4_filters(pdata, &l_l3_filter);
2642 pr_err("%d filter is not supported in the HW\n",
2643 l_l3_filter.filter_no);
2644 return EQOS_NO_HW_SUPPORT;
2645 }
2646
2647 if (!pdata->l3_l4_filter) {
2648 hw_if->config_l3_l4_filter_enable(1);
2649 pdata->l3_l4_filter = 1;
2650 }
2651
2652 /* configure the L3 filters */
2653 hw_if->config_l3_filters(l_l3_filter.filter_no,
2654 l_l3_filter.filter_enb_dis, 0,
2655 l_l3_filter.src_dst_addr_match,
2656 l_l3_filter.perfect_inverse_match);
2657
2658 if (!l_l3_filter.src_dst_addr_match)
2659 hw_if->update_ip4_addr0(l_l3_filter.filter_no,
2660 l_l3_filter.ip4_addr);
2661 else
2662 hw_if->update_ip4_addr1(l_l3_filter.filter_no,
2663 l_l3_filter.ip4_addr);
2664
2665 DBGPR_FILTER
2666 ("Successfully %s IPv4 %s %s addressing filtering on %d filter\n",
2667 (l_l3_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2668 (l_l3_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2669 (l_l3_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2670 l_l3_filter.filter_no);
2671 2790
2672 DBGPR_FILTER("<--eqos_config_ip4_filters\n"); 2791 DBGPR_FILTER("<--eqos_config_ip4_filters\n");
2673 2792
@@ -2723,7 +2842,10 @@ static int eqos_config_ip6_filters(struct net_device *dev,
2723 hw_if->config_l3_filters(l_l3_filter.filter_no, 2842 hw_if->config_l3_filters(l_l3_filter.filter_no,
2724 l_l3_filter.filter_enb_dis, 1, 2843 l_l3_filter.filter_enb_dis, 1,
2725 l_l3_filter.src_dst_addr_match, 2844 l_l3_filter.src_dst_addr_match,
2726 l_l3_filter.perfect_inverse_match); 2845 l_l3_filter.perfect_inverse_match,
2846 l_l3_filter.dma_routing_enable,
2847 l_l3_filter.dma_channel,
2848 l_l3_filter.l3_mask);
2727 2849
2728 hw_if->update_ip6_addr(l_l3_filter.filter_no, l_l3_filter.ip6_addr); 2850 hw_if->update_ip6_addr(l_l3_filter.filter_no, l_l3_filter.ip6_addr);
2729 2851
@@ -2739,6 +2861,65 @@ static int eqos_config_ip6_filters(struct net_device *dev,
2739 return ret; 2861 return ret;
2740} 2862}
2741 2863
2864static int __eqos_config_tcp_udp_filters(struct eqos_prv_data *pdata,
2865 struct eqos_l3_l4_filter *l4_filter, int tcp_udp)
2866
2867{
2868 struct hw_if_struct *hw_if = &(pdata->hw_if);
2869 int ret = 0;
2870
2871 if (pdata->hw_feat.l3l4_filter_num == 0)
2872 return EQOS_NO_HW_SUPPORT;
2873
2874 if ((l4_filter->filter_no + 1) > pdata->hw_feat.l3l4_filter_num) {
2875 pr_err("%d filter is not supported in the HW\n",
2876 l4_filter->filter_no);
2877 return EQOS_NO_HW_SUPPORT;
2878 }
2879
2880 if ((l4_filter->dma_routing_enable == EQOS_DMA_FILTER_ENABLE) &&
2881 (l4_filter->dma_channel > (pdata->hw_feat.rx_ch_cnt))) {
2882 pr_err("%u dma channel is not supported in the HW\n",
2883 l4_filter->dma_channel);
2884 return EQOS_NO_HW_SUPPORT;
2885 }
2886
2887 if (!pdata->l3_l4_filter) {
2888 hw_if->config_l3_l4_filter_enable(1);
2889 pdata->l3_l4_filter = 1;
2890 }
2891
2892 /* configure the L4 filters */
2893 hw_if->config_l4_filters(l4_filter->filter_no,
2894 l4_filter->filter_enb_dis,
2895 tcp_udp,
2896 l4_filter->src_dst_addr_match,
2897 l4_filter->perfect_inverse_match,
2898 l4_filter->dma_routing_enable,
2899 l4_filter->dma_channel);
2900
2901 if (l4_filter->src_dst_addr_match)
2902 hw_if->update_l4_da_port_no(l4_filter->filter_no,
2903 l4_filter->port_no);
2904 else
2905 hw_if->update_l4_sa_port_no(l4_filter->filter_no,
2906 l4_filter->port_no);
2907
2908#ifdef FILTER_DEBUGFS
2909 __eqos_debugfs_filter(pdata, l4_filter->filter_no, 1, l4_filter,
2910 sizeof(*l4_filter));
2911#endif
2912 DBGPR_FILTER
2913 ("Successfully %s %s %s %s Port number filtering on %d filter\n",
2914 (l4_filter->filter_enb_dis ? "ENABLED" : "DISABLED"),
2915 (tcp_udp ? "UDP" : "TCP"),
2916 (l4_filter->perfect_inverse_match ? "INVERSE" : "PERFECT"),
2917 (l4_filter->src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2918 l4_filter->filter_no);
2919
2920 return ret;
2921}
2922
2742/*! 2923/*!
2743 * \details This function is invoked by ioctl function when user issues an 2924 * \details This function is invoked by ioctl function when user issues an
2744 * ioctl command to configure L4(TCP/UDP) filtering. This function does following, 2925 * ioctl command to configure L4(TCP/UDP) filtering. This function does following,
@@ -2786,27 +2967,7 @@ static int eqos_config_tcp_udp_filters(struct net_device *dev,
2786 pdata->l3_l4_filter = 1; 2967 pdata->l3_l4_filter = 1;
2787 } 2968 }
2788 2969
2789 /* configure the L4 filters */ 2970 ret = __eqos_config_tcp_udp_filters(pdata, &l_l4_filter, tcp_udp);
2790 hw_if->config_l4_filters(l_l4_filter.filter_no,
2791 l_l4_filter.filter_enb_dis,
2792 tcp_udp,
2793 l_l4_filter.src_dst_addr_match,
2794 l_l4_filter.perfect_inverse_match);
2795
2796 if (l_l4_filter.src_dst_addr_match)
2797 hw_if->update_l4_da_port_no(l_l4_filter.filter_no,
2798 l_l4_filter.port_no);
2799 else
2800 hw_if->update_l4_sa_port_no(l_l4_filter.filter_no,
2801 l_l4_filter.port_no);
2802
2803 DBGPR_FILTER
2804 ("Successfully %s %s %s %s Port number filtering on %d filter\n",
2805 (l_l4_filter.filter_enb_dis ? "ENABLED" : "DISABLED"),
2806 (tcp_udp ? "UDP" : "TCP"),
2807 (l_l4_filter.perfect_inverse_match ? "INVERSE" : "PERFECT"),
2808 (l_l4_filter.src_dst_addr_match ? "DESTINATION" : "SOURCE"),
2809 l_l4_filter.filter_no);
2810 2971
2811 DBGPR_FILTER("<--eqos_config_tcp_udp_filters\n"); 2972 DBGPR_FILTER("<--eqos_config_tcp_udp_filters\n");
2812 2973
@@ -5430,6 +5591,101 @@ void eqos_stop_dev(struct eqos_prv_data *pdata)
5430 pr_debug("<--%s()\n", __func__); 5591 pr_debug("<--%s()\n", __func__);
5431} 5592}
5432 5593
5594void get_configure_l3v4_filter(struct eqos_prv_data *pdata)
5595{
5596 struct eqos_l3_l4_filter l3_l4_filter;
5597 int i, filter_nums;
5598 u32 filters[EQOS_MAX_L3_L4_FILTER][3] = { {0} };
5599 struct device_node *pnode = pdata->pdev->dev.of_node;
5600
5601 dev_info(&pdata->dev->dev, "%s -->", __func__);
5602
5603 /* nvidia,filters = < filter_conf IP_addr Port_num >
5604 * filter conf bits :
5605 * 2 -> src/dst
5606 * 3 -> enable/disable
5607 * 4 -> perfect/inverse
5608 * 5 -> dma_routing_enable
5609 * 8:6 -> dma_channel
5610 * 9 -> ipv4/ipv6
5611 * 10 -> tcp/udp
5612 * 15:11 -> Mask Value for L3 filter address
5613 * holds mask for L3 filter, 0 -> 31
5614 * 0: No bits are masked
5615 * 1: LSb[0] is maksed
5616 * 2: LSb[1:0] are masked
5617 * ....
5618 * 31: LSb[0:30] are masked , leaving only bit 31
5619 *
5620 * 26:24 -> L3/L4
5621 * 30:28 -> filter number
5622 */
5623
5624 filter_nums = of_property_read_variable_u32_array(pnode,
5625 "nvidia,filters", (u32 *)filters, 3,
5626 EQOS_MAX_L3_L4_FILTER * 3);
5627
5628 if (filter_nums < 0 || ((filter_nums % 3) != 0)) {
5629 if (filter_nums != -EINVAL)
5630 dev_err(&pdata->pdev->dev,
5631 "%s: nvidia,filters read failed\n", __func__);
5632 return;
5633 }
5634
5635 filter_nums /= 3;
5636
5637 for (i = 0; i < filter_nums; i += 1) {
5638 u32 cur_filter = filters[i][0];
5639 u8 type = (u8)((cur_filter >> 24) & 0x7);
5640
5641 if (cur_filter & (1U << 9)) {
5642 dev_err(&pdata->pdev->dev, "Filter %d failed," \
5643 " IPV6 not supported\n", ((cur_filter >> 28) & 0x7));
5644 continue;
5645 }
5646
5647 memset(&l3_l4_filter, 0, sizeof(l3_l4_filter));
5648 l3_l4_filter.filter_no = ((cur_filter >> 28) & 0x7);
5649 l3_l4_filter.filter_enb_dis = ((cur_filter >> 3) & 0x1);
5650 l3_l4_filter.src_dst_addr_match = ((cur_filter >> 2) & 0x1);
5651 l3_l4_filter.perfect_inverse_match = ((cur_filter >> 4) & 0x1);
5652 l3_l4_filter.dma_routing_enable = ((cur_filter >> 5) & 0x1);
5653 l3_l4_filter.dma_channel = ((cur_filter >> 6) & 0x7);
5654
5655 /* L3 Filter */
5656 if (!type) {
5657 l3_l4_filter.ip4_addr[0] = ((u8 *)&filters[i][1])[3];
5658 l3_l4_filter.ip4_addr[1] = ((u8 *)&filters[i][1])[2];
5659 l3_l4_filter.ip4_addr[2] = ((u8 *)&filters[i][1])[1];
5660 l3_l4_filter.ip4_addr[3] = ((u8 *)&filters[i][1])[0];
5661 l3_l4_filter.l3_mask = ((cur_filter >> 11) & 0x1f);
5662
5663 if (__eqos_config_ip4_filters(pdata, &l3_l4_filter))
5664 dev_err(&pdata->pdev->dev,
5665 "%s: Failed to configure filter %d\n",
5666 __func__, l3_l4_filter.filter_no);
5667
5668 /* L4 Filter */
5669 } else if (type == 1) {
5670 int is_udp = ((cur_filter >> 10) & 0x1);
5671
5672 l3_l4_filter.port_no = filters[i][2];
5673
5674 if (__eqos_config_tcp_udp_filters(pdata, &l3_l4_filter,
5675 is_udp))
5676 dev_err(&pdata->pdev->dev,
5677 "%s: Failed to configure filter %d\n",
5678 __func__, l3_l4_filter.filter_no);
5679
5680 /* Unsupported filter type */
5681 } else {
5682 dev_err(&pdata->pdev->dev,
5683 "%s: Wrong filter type %u for filter %d\n",
5684 __func__, type, l3_l4_filter.filter_no);
5685 }
5686 }
5687}
5688
5433void eqos_start_dev(struct eqos_prv_data *pdata) 5689void eqos_start_dev(struct eqos_prv_data *pdata)
5434{ 5690{
5435 struct hw_if_struct *hw_if = &pdata->hw_if; 5691 struct hw_if_struct *hw_if = &pdata->hw_if;
@@ -5455,6 +5711,8 @@ void eqos_start_dev(struct eqos_prv_data *pdata)
5455 /* initializes MAC and DMA */ 5711 /* initializes MAC and DMA */
5456 hw_if->init(pdata); 5712 hw_if->init(pdata);
5457 5713
5714 get_configure_l3v4_filter(pdata);
5715
5458 MAC_1US_TIC_WR(pdata->csr_clock_speed - 1); 5716 MAC_1US_TIC_WR(pdata->csr_clock_speed - 1);
5459 5717
5460 if (pdata->hw_feat.pcs_sel) 5718 if (pdata->hw_feat.pcs_sel)
diff --git a/drivers/net/ethernet/nvidia/eqos/init.c b/drivers/net/ethernet/nvidia/eqos/init.c
index b4c7e61f7..596d1da16 100644
--- a/drivers/net/ethernet/nvidia/eqos/init.c
+++ b/drivers/net/ethernet/nvidia/eqos/init.c
@@ -62,6 +62,9 @@
62#include <linux/workqueue.h> 62#include <linux/workqueue.h>
63#include <linux/tegra_prod.h> 63#include <linux/tegra_prod.h>
64#include <linux/of_net.h> 64#include <linux/of_net.h>
65#ifdef FILTER_DEBUGFS
66#include <linux/debugfs.h>
67#endif
65 68
66#define LP_SUPPORTED 0 69#define LP_SUPPORTED 0
67static const struct of_device_id eqos_of_match[] = { 70static const struct of_device_id eqos_of_match[] = {
@@ -182,8 +185,9 @@ void get_dt_u32_array(struct eqos_prv_data *pdata, char *pdt_prop, u32 *pval,
182 ret = of_property_read_u32_array(pnode, pdt_prop, pval, num_entries); 185 ret = of_property_read_u32_array(pnode, pdt_prop, pval, num_entries);
183 186
184 if (ret < 0) { 187 if (ret < 0) {
185 pr_err("%s(): \"%s\" read failed %d. Using default\n", 188 if (ret != -EINVAL)
186 __func__, pdt_prop, ret); 189 pr_err("%s(): \"%s\" read failed %d. Using default\n",
190 __func__, pdt_prop, ret);
187 for (i = 0; i < num_entries; i++) 191 for (i = 0; i < num_entries; i++)
188 pval[i] = val_def; 192 pval[i] = val_def;
189 } 193 }
@@ -1137,6 +1141,13 @@ int eqos_probe(struct platform_device *pdev)
1137 pdata->pdev = pdev; 1141 pdata->pdev = pdev;
1138 1142
1139 pdata->dev = ndev; 1143 pdata->dev = ndev;
1144#ifdef FILTER_DEBUGFS
1145 pdata->d_root = debugfs_create_dir(dev_name(&pdev->dev), NULL);
1146 if (IS_ERR_OR_NULL(pdata->d_root))
1147 dev_warn(&pdev->dev, "debugfs_create_dir failed: %ld\n",
1148 PTR_ERR(pdata->d_root));
1149 INIT_LIST_HEAD(&pdata->d_head);
1150#endif
1140 1151
1141 for (i = 0; i < num_chans; i++) 1152 for (i = 0; i < num_chans; i++)
1142 spin_lock_init(&pdata->chan_irq_lock[i]); 1153 spin_lock_init(&pdata->chan_irq_lock[i]);
@@ -1233,12 +1244,10 @@ int eqos_probe(struct platform_device *pdev)
1233 ret = hw_if->pad_calibrate(pdata); 1244 ret = hw_if->pad_calibrate(pdata);
1234 if (ret < 0) 1245 if (ret < 0)
1235 goto err_out_pad_calibrate_failed; 1246 goto err_out_pad_calibrate_failed;
1236
1237#ifdef EQOS_CONFIG_DEBUGFS 1247#ifdef EQOS_CONFIG_DEBUGFS
1238 /* to give prv data to debugfs */ 1248 /* To give prv data to debugfs */
1239 eqos_get_pdata(pdata); 1249 eqos_get_pdata(pdata);
1240#endif 1250#endif
1241
1242 ndev->irq = irq; 1251 ndev->irq = irq;
1243 pdata->common_irq = irq; 1252 pdata->common_irq = irq;
1244 1253
@@ -1291,6 +1300,8 @@ int eqos_probe(struct platform_device *pdev)
1291 RXQ_CTRL_DEFAULT, RXQ_CTRL_MAX, 4); 1300 RXQ_CTRL_DEFAULT, RXQ_CTRL_MAX, 4);
1292 get_dt_u32_array(pdata, "nvidia,queue_prio", pdt_cfg->q_prio, 1301 get_dt_u32_array(pdata, "nvidia,queue_prio", pdt_cfg->q_prio,
1293 QUEUE_PRIO_DEFAULT, QUEUE_PRIO_MAX, 4); 1302 QUEUE_PRIO_DEFAULT, QUEUE_PRIO_MAX, 4);
1303 get_dt_u32_array(pdata, "nvidia,queue_dma_map", pdt_cfg->q_dma_map,
1304 STATIC_Q_DMA_MAP, DYNAMIC_Q_DMA_MAP, 4);
1294 get_dt_u32(pdata, "nvidia,iso_bw", &pdt_cfg->iso_bw, ISO_BW_DEFAULT, 1305 get_dt_u32(pdata, "nvidia,iso_bw", &pdt_cfg->iso_bw, ISO_BW_DEFAULT,
1295 ISO_BW_DEFAULT); 1306 ISO_BW_DEFAULT);
1296 get_dt_u32(pdata, "nvidia,eth_iso_enable", &pdt_cfg->eth_iso_enable, 0, 1307 get_dt_u32(pdata, "nvidia,eth_iso_enable", &pdt_cfg->eth_iso_enable, 0,
@@ -1300,7 +1311,7 @@ int eqos_probe(struct platform_device *pdev)
1300 &pdt_cfg->slot_intvl_val, 1311 &pdt_cfg->slot_intvl_val,
1301 SLOT_INTVL_DEFAULT, SLOT_INTVL_MAX); 1312 SLOT_INTVL_DEFAULT, SLOT_INTVL_MAX);
1302 pdata->dt_cfg.phy_apd_mode = of_property_read_bool(node, 1313 pdata->dt_cfg.phy_apd_mode = of_property_read_bool(node,
1303 "nvidia,brcm_phy_apd_mode"); 1314 "nvidia,brcm_phy_apd_mode");
1304 eqos_get_slot_num_check_queues(pdata, pdt_cfg->slot_num_check); 1315 eqos_get_slot_num_check_queues(pdata, pdt_cfg->slot_num_check);
1305 1316
1306#ifndef DISABLE_TRISTATE 1317#ifndef DISABLE_TRISTATE
@@ -1452,7 +1463,7 @@ int eqos_probe(struct platform_device *pdev)
1452 dev_err(&pdata->pdev->dev, 1463 dev_err(&pdata->pdev->dev,
1453 "Failed to register attributes: %d\n", ret); 1464 "Failed to register attributes: %d\n", ret);
1454 goto err_sysfs_create_failed; 1465 goto err_sysfs_create_failed;
1455 } 1466 }
1456 } 1467 }
1457 1468
1458 spin_lock_init(&pdata->lock); 1469 spin_lock_init(&pdata->lock);
@@ -1554,6 +1565,10 @@ int eqos_probe(struct platform_device *pdev)
1554 if (!tegra_platform_is_unit_fpga()) 1565 if (!tegra_platform_is_unit_fpga())
1555 eqos_regulator_deinit(pdata); 1566 eqos_regulator_deinit(pdata);
1556 err_out_regulator_en_failed: 1567 err_out_regulator_en_failed:
1568#ifdef FILTER_DEBUGFS
1569 if (!IS_ERR_OR_NULL(pdata->d_root))
1570 debugfs_remove_recursive(pdata->d_root);
1571#endif
1557 free_netdev(ndev); 1572 free_netdev(ndev);
1558 platform_set_drvdata(pdev, NULL); 1573 platform_set_drvdata(pdev, NULL);
1559 1574
@@ -1594,6 +1609,10 @@ int eqos_remove(struct platform_device *pdev)
1594 1609
1595 ndev = platform_get_drvdata(pdev); 1610 ndev = platform_get_drvdata(pdev);
1596 pdata = netdev_priv(ndev); 1611 pdata = netdev_priv(ndev);
1612#ifdef FILTER_DEBUGFS
1613 if (!IS_ERR_OR_NULL(pdata->d_root))
1614 debugfs_remove_recursive(pdata->d_root);
1615#endif
1597 pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg; 1616 pdt_cfg = (struct eqos_cfg *)&pdata->dt_cfg;
1598 desc_if = &(pdata->desc_if); 1617 desc_if = &(pdata->desc_if);
1599 1618
diff --git a/drivers/net/ethernet/nvidia/eqos/yapphdr.h b/drivers/net/ethernet/nvidia/eqos/yapphdr.h
index 4b25f3518..6b0b29b94 100644
--- a/drivers/net/ethernet/nvidia/eqos/yapphdr.h
+++ b/drivers/net/ethernet/nvidia/eqos/yapphdr.h
@@ -30,7 +30,7 @@
30 * ========================================================================= 30 * =========================================================================
31 */ 31 */
32/* 32/*
33 * Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. 33 * Copyright (c) 2015-2021, NVIDIA CORPORATION. All rights reserved.
34 * 34 *
35 * This program is free software; you can redistribute it and/or modify it 35 * This program is free software; you can redistribute it and/or modify it
36 * under the terms and conditions of the GNU General Public License, 36 * under the terms and conditions of the GNU General Public License,
@@ -212,6 +212,10 @@
212#define EQOS_L3_L4_FILTER_DISABLE 0x0 212#define EQOS_L3_L4_FILTER_DISABLE 0x0
213#define EQOS_L3_L4_FILTER_ENABLE 0x1 213#define EQOS_L3_L4_FILTER_ENABLE 0x1
214 214
215/* DMA Routing operations */
216#define EQOS_DMA_FILTER_DISABLE 0x0
217#define EQOS_DMA_FILTER_ENABLE 0x1
218
215/* Loopback mode */ 219/* Loopback mode */
216#define EQOS_MAC_LOOPBACK_DISABLE 0x0 220#define EQOS_MAC_LOOPBACK_DISABLE 0x0
217#define EQOS_MAC_LOOPBACK_ENABLE 0x1 221#define EQOS_MAC_LOOPBACK_ENABLE 0x1
@@ -331,6 +335,21 @@ struct eqos_l3_l4_filter {
331 335
332 /* TCP/UDP src/dst port number */ 336 /* TCP/UDP src/dst port number */
333 unsigned short port_no; 337 unsigned short port_no;
338
339 /* 0 - disable and 1 - enable */
340 int dma_routing_enable;
341
342 /* To hold DMA channel */
343 unsigned short dma_channel;
344
345 /* holds mask for L3 filter, 0 -> 31
346 * 0: No bits are masked
347 * 1: LSb[0] is maksed
348 * 2: LSb[1:0] are masked
349 * ....
350 * 31: LSb[0:30] are masked , leaving only 31 bit
351 */
352 unsigned short l3_mask;
334}; 353};
335 354
336struct eqos_vlan_filter { 355struct eqos_vlan_filter {
diff --git a/drivers/net/ethernet/nvidia/eqos/yheader.h b/drivers/net/ethernet/nvidia/eqos/yheader.h
index f6e1794b2..1c14162e3 100644
--- a/drivers/net/ethernet/nvidia/eqos/yheader.h
+++ b/drivers/net/ethernet/nvidia/eqos/yheader.h
@@ -584,6 +584,9 @@
584/* Hash Table Reg count */ 584/* Hash Table Reg count */
585#define EQOS_HTR_CNT (pdata->max_hash_table_size/32) 585#define EQOS_HTR_CNT (pdata->max_hash_table_size/32)
586 586
587/* MAX L3/L4 filters support */
588#define EQOS_MAX_L3_L4_FILTER 8U
589
587/* For handling VLAN filtering */ 590/* For handling VLAN filtering */
588#define EQOS_VLAN_PERFECT_FILTERING 0 591#define EQOS_VLAN_PERFECT_FILTERING 0
589#define EQOS_VLAN_HASH_FILTERING 1 592#define EQOS_VLAN_HASH_FILTERING 1
@@ -903,13 +906,16 @@ struct hw_if_struct {
903 INT(*config_mac_pkt_filter_reg)(UCHAR, UCHAR, UCHAR, UCHAR, UCHAR); 906 INT(*config_mac_pkt_filter_reg)(UCHAR, UCHAR, UCHAR, UCHAR, UCHAR);
904 INT(*config_l3_l4_filter_enable)(INT); 907 INT(*config_l3_l4_filter_enable)(INT);
905 INT(*config_l3_filters)(INT filter_no, INT enb_dis, INT ipv4_ipv6_match, 908 INT(*config_l3_filters)(INT filter_no, INT enb_dis, INT ipv4_ipv6_match,
906 INT src_dst_addr_match, INT perfect_inverse_match); 909 INT src_dst_addr_match, INT perfect_inverse_match,
910 INT dma_routing_enable, USHORT dma_channel,
911 USHORT l3_mask);
907 INT(*update_ip4_addr0)(INT filter_no, UCHAR addr[]); 912 INT(*update_ip4_addr0)(INT filter_no, UCHAR addr[]);
908 INT(*update_ip4_addr1)(INT filter_no, UCHAR addr[]); 913 INT(*update_ip4_addr1)(INT filter_no, UCHAR addr[]);
909 INT(*update_ip6_addr)(INT filter_no, USHORT addr[]); 914 INT(*update_ip6_addr)(INT filter_no, USHORT addr[]);
910 INT(*config_l4_filters)(INT filter_no, INT enb_dis, 915 INT(*config_l4_filters)(INT filter_no, INT enb_dis, INT tcp_udp_match,
911 INT tcp_udp_match, INT src_dst_port_match, 916 INT src_dst_port_match,
912 INT perfect_inverse_match); 917 INT perfect_inverse_match,
918 INT dma_routing_enable, USHORT dma_channel);
913 INT(*update_l4_sa_port_no)(INT filter_no, USHORT port_no); 919 INT(*update_l4_sa_port_no)(INT filter_no, USHORT port_no);
914 INT(*update_l4_da_port_no)(INT filter_no, USHORT port_no); 920 INT(*update_l4_da_port_no)(INT filter_no, USHORT port_no);
915 921
@@ -1342,6 +1348,8 @@ typedef enum {
1342} pause_frames_e; 1348} pause_frames_e;
1343#define PAUSE_FRAMES_DEFAULT PAUSE_FRAMES_ENABLED 1349#define PAUSE_FRAMES_DEFAULT PAUSE_FRAMES_ENABLED
1344 1350
1351#define STATIC_Q_DMA_MAP 0
1352#define DYNAMIC_Q_DMA_MAP 1
1345#define QUEUE_PRIO_DEFAULT 0 1353#define QUEUE_PRIO_DEFAULT 0
1346#define QUEUE_PRIO_MAX 7 1354#define QUEUE_PRIO_MAX 7
1347#define CHAN_NAPI_QUOTA_DEFAULT 64 1355#define CHAN_NAPI_QUOTA_DEFAULT 64
@@ -1352,6 +1360,7 @@ typedef enum {
1352struct eqos_cfg { 1360struct eqos_cfg {
1353 bool use_multi_q; /* 0=single queue, jumbo frames enabled */ 1361 bool use_multi_q; /* 0=single queue, jumbo frames enabled */
1354 rxq_ctrl_e rxq_ctrl[MAX_CHANS]; 1362 rxq_ctrl_e rxq_ctrl[MAX_CHANS];
1363 uint q_dma_map[MAX_CHANS];
1355 uint q_prio[MAX_CHANS]; 1364 uint q_prio[MAX_CHANS];
1356 uint chan_napi_quota[MAX_CHANS]; 1365 uint chan_napi_quota[MAX_CHANS];
1357 uint slot_num_check[MAX_CHANS]; 1366 uint slot_num_check[MAX_CHANS];
@@ -1587,6 +1596,11 @@ struct eqos_prv_data {
1587 /** Reserve SKB pointer and DMA */ 1596 /** Reserve SKB pointer and DMA */
1588 struct sk_buff *resv_skb; 1597 struct sk_buff *resv_skb;
1589 dma_addr_t resv_dma; 1598 dma_addr_t resv_dma;
1599 /* debugfs */
1600#ifdef FILTER_DEBUGFS
1601 struct dentry *d_root;
1602 struct list_head d_head;
1603#endif
1590}; 1604};
1591 1605
1592typedef enum { 1606typedef enum {
diff --git a/drivers/net/ethernet/nvidia/eqos/yregacc.h b/drivers/net/ethernet/nvidia/eqos/yregacc.h
index 0cbe363dd..18dfb14fe 100644
--- a/drivers/net/ethernet/nvidia/eqos/yregacc.h
+++ b/drivers/net/ethernet/nvidia/eqos/yregacc.h
@@ -30,7 +30,7 @@
30 * ========================================================================= 30 * =========================================================================
31 */ 31 */
32/* 32/*
33 * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. 33 * Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
34 * 34 *
35 * This program is free software; you can redistribute it and/or modify it 35 * This program is free software; you can redistribute it and/or modify it
36 * under the terms and conditions of the GNU General Public License, 36 * under the terms and conditions of the GNU General Public License,
@@ -37646,6 +37646,20 @@ extern ULONG eqos_base_addr;
37646#define MAC_ISR_LPI_LPOS 5 37646#define MAC_ISR_LPI_LPOS 5
37647#define MAC_ISR_LPI_HPOS 5 37647#define MAC_ISR_LPI_HPOS 5
37648 37648
37649#define MAC_L3L4_CTR_DMCHEN0 BIT(28)
37650#define MAC_L3L4_CTR_DMCHEN0_SHIFT 28
37651#define MAC_L3L4_CTR_DMCHN0 (BIT(24) | BIT(25) | \
37652 BIT(26) | BIT(27))
37653#define MAC_L3L4_CTR_DMCHN0_SHIFT 24
37654
37655#define MAC_L3L4_CTR_L3HDBM0 (BIT(11) | BIT(12) | \
37656 BIT(13) | BIT(14) | BIT(15))
37657#define MAC_L3L4_CTR_L3HDBM0_SHIFT 11
37658
37659#define MAC_L3L4_CTR_L3HSBM0 (BIT(6) | BIT(7) | \
37660 BIT(8) | BIT(9) | BIT(10))
37661#define MAC_L3L4_CTR_L3HSBM0_SHIFT 6
37662
37649#define GET_VALUE(data, lbit, hbit) ((data >> lbit) & (~(~0<<(hbit-lbit+1)))) 37663#define GET_VALUE(data, lbit, hbit) ((data >> lbit) & (~(~0<<(hbit-lbit+1))))
37650 37664
37651#define GET_INDEXED_VALUE(data, lbit, hbit, index)\ 37665#define GET_INDEXED_VALUE(data, lbit, hbit, index)\