aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSunil Goutham <sgoutham@marvell.com>2018-10-22 13:55:49 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-22 23:15:37 -0400
commitb279bbb3314e114609983b02c4253aff6b4be976 (patch)
tree1dcecd0b02696d5ee639a17ef360a9d279bfc942
parenta3e7121c704470e2c33a4b91bb218205947cd531 (diff)
octeontx2-af: NIX Tx scheduler queue config support
This patch adds support for a PF/VF driver to configure NIX transmit scheduler queues via mbox. Since PF/VF doesn't know the absolute HW index of the NIXLF attached to it, AF traps the register config and overwrites with the correct NIXLF index. HW supports shaping, colouring and policing of packets with these multilevel traffic scheduler queues. Instead of introducing different mbox message formats for different configurations and making both AF & PF/VF driver implementation cumbersome, access to the scheduler queue's CSRs is provided via mbox. AF checks whether the sender PF/VF has the corresponding queue allocated or not and dumps the config to HW. With a single mbox msg 20 registers can be configured. Signed-off-by: Sunil Goutham <sgoutham@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h15
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c104
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c71
5 files changed, 199 insertions, 5 deletions
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 45b108f8f955..264cbd7bc1ea 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -7,4 +7,5 @@ obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
7obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o 7obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
8 8
9octeontx2_mbox-y := mbox.o 9octeontx2_mbox-y := mbox.o
10octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o 10octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
11 rvu_reg.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 282e556576ab..f2e0743d2a2e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -154,7 +154,8 @@ M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \
154M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \ 154M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \
155M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \ 155M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \
156M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ 156M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \
157M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) 157M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \
158M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp)
158 159
159/* Messages initiated by AF (range 0xC00 - 0xDFF) */ 160/* Messages initiated by AF (range 0xC00 - 0xDFF) */
160#define MBOX_UP_CGX_MESSAGES \ 161#define MBOX_UP_CGX_MESSAGES \
@@ -448,4 +449,16 @@ struct nix_txsch_free_req {
448 u16 schq; 449 u16 schq;
449}; 450};
450 451
452struct nix_txschq_config {
453 struct mbox_msghdr hdr;
454 u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */
455#define TXSCHQ_IDX_SHIFT 16
456#define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1)
457#define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK)
458 u8 num_regs;
459#define MAX_REGS_PER_MBOX_MSG 20
460 u64 reg[MAX_REGS_PER_MBOX_MSG];
461 u64 regval[MAX_REGS_PER_MBOX_MSG];
462};
463
451#endif /* MBOX_H */ 464#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c402eba82784..4b15552655e7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -195,6 +195,14 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
195int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); 195int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
196int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); 196int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
197 197
198/* RVU HW reg validation */
199enum regmap_block {
200 TXSCHQ_HWREGMAP = 0,
201 MAX_HWREGMAP,
202};
203
204bool rvu_check_valid_reg(int regmap, int regblk, u64 reg);
205
198/* NPA/NIX AQ APIs */ 206/* NPA/NIX AQ APIs */
199int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, 207int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
200 int qsize, int inst_size, int res_size); 208 int qsize, int inst_size, int res_size);
@@ -277,4 +285,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu,
277int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, 285int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
278 struct nix_txsch_free_req *req, 286 struct nix_txsch_free_req *req,
279 struct msg_rsp *rsp); 287 struct msg_rsp *rsp);
288int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
289 struct nix_txschq_config *req,
290 struct msg_rsp *rsp);
280#endif /* RVU_H */ 291#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index e8374d9ebdff..56f242d1e587 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -738,10 +738,10 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
738 if (lvl == NIX_TXSCH_LVL_TL4) 738 if (lvl == NIX_TXSCH_LVL_TL4)
739 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); 739 rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00);
740 740
741 if (lvl != NIX_TXSCH_LVL_TL3) 741 if (lvl != NIX_TXSCH_LVL_TL2)
742 return; 742 return;
743 743
744 /* Reset TL3's CGX or LBK link config */ 744 /* Reset TL2's CGX or LBK link config */
745 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) 745 for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++)
746 rvu_write64(rvu, blkaddr, 746 rvu_write64(rvu, blkaddr,
747 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); 747 NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
@@ -851,7 +851,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
851 /* Disable TL2/3 queue links before SMQ flush*/ 851 /* Disable TL2/3 queue links before SMQ flush*/
852 spin_lock(&rvu->rsrc_lock); 852 spin_lock(&rvu->rsrc_lock);
853 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { 853 for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
854 if (lvl != NIX_TXSCH_LVL_TL3 && lvl != NIX_TXSCH_LVL_TL4) 854 if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
855 continue; 855 continue;
856 856
857 txsch = &nix_hw->txsch[lvl]; 857 txsch = &nix_hw->txsch[lvl];
@@ -909,6 +909,104 @@ int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu,
909 return nix_txschq_free(rvu, req->hdr.pcifunc); 909 return nix_txschq_free(rvu, req->hdr.pcifunc);
910} 910}
911 911
912static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
913 int lvl, u64 reg, u64 regval)
914{
915 u64 regbase = reg & 0xFFFF;
916 u16 schq, parent;
917
918 if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, lvl, reg))
919 return false;
920
921 schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
922 /* Check if this schq belongs to this PF/VF or not */
923 if (!is_valid_txschq(rvu, blkaddr, lvl, pcifunc, schq))
924 return false;
925
926 parent = (regval >> 16) & 0x1FF;
927 /* Validate MDQ's TL4 parent */
928 if (regbase == NIX_AF_MDQX_PARENT(0) &&
929 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL4, pcifunc, parent))
930 return false;
931
932 /* Validate TL4's TL3 parent */
933 if (regbase == NIX_AF_TL4X_PARENT(0) &&
934 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL3, pcifunc, parent))
935 return false;
936
937 /* Validate TL3's TL2 parent */
938 if (regbase == NIX_AF_TL3X_PARENT(0) &&
939 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL2, pcifunc, parent))
940 return false;
941
942 /* Validate TL2's TL1 parent */
943 if (regbase == NIX_AF_TL2X_PARENT(0) &&
944 !is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_TL1, pcifunc, parent))
945 return false;
946
947 return true;
948}
949
950int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu,
951 struct nix_txschq_config *req,
952 struct msg_rsp *rsp)
953{
954 struct rvu_hwinfo *hw = rvu->hw;
955 u16 pcifunc = req->hdr.pcifunc;
956 u64 reg, regval, schq_regbase;
957 struct nix_txsch *txsch;
958 struct nix_hw *nix_hw;
959 int blkaddr, idx, err;
960 int nixlf;
961
962 if (req->lvl >= NIX_TXSCH_LVL_CNT ||
963 req->num_regs > MAX_REGS_PER_MBOX_MSG)
964 return NIX_AF_INVAL_TXSCHQ_CFG;
965
966 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
967 if (blkaddr < 0)
968 return NIX_AF_ERR_AF_LF_INVALID;
969
970 nix_hw = get_nix_hw(rvu->hw, blkaddr);
971 if (!nix_hw)
972 return -EINVAL;
973
974 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
975 if (nixlf < 0)
976 return NIX_AF_ERR_AF_LF_INVALID;
977
978 txsch = &nix_hw->txsch[req->lvl];
979 for (idx = 0; idx < req->num_regs; idx++) {
980 reg = req->reg[idx];
981 regval = req->regval[idx];
982 schq_regbase = reg & 0xFFFF;
983
984 if (!is_txschq_config_valid(rvu, pcifunc, blkaddr,
985 txsch->lvl, reg, regval))
986 return NIX_AF_INVAL_TXSCHQ_CFG;
987
988 /* Replace PF/VF visible NIXLF slot with HW NIXLF id */
989 if (schq_regbase == NIX_AF_SMQX_CFG(0)) {
990 nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
991 pcifunc, 0);
992 regval &= ~(0x7FULL << 24);
993 regval |= ((u64)nixlf << 24);
994 }
995
996 rvu_write64(rvu, blkaddr, reg, regval);
997
998 /* Check for SMQ flush, if so, poll for its completion */
999 if (schq_regbase == NIX_AF_SMQX_CFG(0) &&
1000 (regval & BIT_ULL(49))) {
1001 err = rvu_poll_reg(rvu, blkaddr,
1002 reg, BIT_ULL(49), true);
1003 if (err)
1004 return NIX_AF_SMQ_FLUSH_FAILED;
1005 }
1006 }
1007 return 0;
1008}
1009
912static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) 1010static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
913{ 1011{
914 struct nix_txsch *txsch; 1012 struct nix_txsch *txsch;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
new file mode 100644
index 000000000000..9d7c135c7965
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c
@@ -0,0 +1,71 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Marvell OcteonTx2 RVU Admin Function driver
3 *
4 * Copyright (C) 2018 Marvell International Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13
14#include "rvu_struct.h"
15#include "common.h"
16#include "mbox.h"
17#include "rvu.h"
18
19struct reg_range {
20 u64 start;
21 u64 end;
22};
23
24struct hw_reg_map {
25 u8 regblk;
26 u8 num_ranges;
27 u64 mask;
28#define MAX_REG_RANGES 8
29 struct reg_range range[MAX_REG_RANGES];
30};
31
32static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = {
33 {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } },
34 {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18},
35 {0x1200, 0x12E0} } },
36 {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608},
37 {0x1610, 0x1618} } },
38 {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x1768} } },
39 {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } },
40};
41
42bool rvu_check_valid_reg(int regmap, int regblk, u64 reg)
43{
44 int idx;
45 struct hw_reg_map *map;
46
47 /* Only 64bit offsets */
48 if (reg & 0x07)
49 return false;
50
51 if (regmap == TXSCHQ_HWREGMAP) {
52 if (regblk >= NIX_TXSCH_LVL_CNT)
53 return false;
54 map = &txsch_reg_map[regblk];
55 } else {
56 return false;
57 }
58
59 /* Should never happen */
60 if (map->regblk != regblk)
61 return false;
62
63 reg &= map->mask;
64
65 for (idx = 0; idx < map->num_ranges; idx++) {
66 if (reg >= map->range[idx].start &&
67 reg < map->range[idx].end)
68 return true;
69 }
70 return false;
71}