diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 72ad04eb8769b514da7659392c7fbb3dee22b8e7..ec50a21c5aaf3ddbd3b88e422e42c631aa73f523 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -143,6 +143,9 @@ enum nix_scheduler {
 	NIX_TXSCH_LVL_CNT = 0x5,
 };
 
+#define TXSCH_TL1_DFLT_RR_QTM      ((1 << 24) - 1)
+#define TXSCH_TL1_DFLT_RR_PRIO     (0x1ull)
+
 /* Min/Max packet sizes, excluding FCS */
 #define	NIC_HW_MIN_FRS			40
 #define	NIC_HW_MAX_FRS			9212
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index a0e445d8eb2ddeda92d03a1c909dd6f542a8765a..b1841725af5bc6eaae533764cd156a95b3119c8c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -156,7 +156,11 @@ struct rvu_pfvf {
 struct nix_txsch {
 	struct rsrc_bmap schq;
 	u8   lvl;
-	u16  *pfvf_map;
+#define NIX_TXSCHQ_TL1_CFG_DONE       BIT_ULL(0)
+#define TXSCH_MAP_FUNC(__pfvf_map)    ((__pfvf_map) & 0xFFFF)
+#define TXSCH_MAP_FLAGS(__pfvf_map)   ((__pfvf_map) >> 16)
+#define TXSCH_MAP(__func, __flags)    (((__func) & 0xFFFF) | ((__flags) << 16))
+	u32  *pfvf_map;
 };
 
 struct npc_pkind {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a1f268652b04469564b8003b898aa4f92d54b64a..0d4929bd3a956a8ff76dcecfbee6a78557abff0e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -127,6 +127,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 {
 	struct nix_txsch *txsch;
 	struct nix_hw *nix_hw;
+	u16 map_func;
 
 	nix_hw = get_nix_hw(rvu->hw, blkaddr);
 	if (!nix_hw)
@@ -138,11 +139,18 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
 		return false;
 
 	mutex_lock(&rvu->rsrc_lock);
-	if (txsch->pfvf_map[schq] != pcifunc) {
-		mutex_unlock(&rvu->rsrc_lock);
-		return false;
-	}
+	map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]);
 	mutex_unlock(&rvu->rsrc_lock);
+
+	/* For TL1 schq, sharing across VF's of same PF is ok */
+	if (lvl == NIX_TXSCH_LVL_TL1 &&
+	    rvu_get_pf(map_func) != rvu_get_pf(pcifunc))
+		return false;
+
+	if (lvl != NIX_TXSCH_LVL_TL1 &&
+	    map_func != pcifunc)
+		return false;
+
 	return true;
 }
 
@@ -494,7 +502,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
 
 	/* Check if SQ pointed SMQ belongs to this PF/VF or not */
 	if (req->ctype == NIX_AQ_CTYPE_SQ &&
-	    req->op != NIX_AQ_INSTOP_WRITE) {
+	    ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
+	     (req->op == NIX_AQ_INSTOP_WRITE &&
+	      req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
 		if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
 				     pcifunc, req->sq.smq))
 			return NIX_AF_ERR_AQ_ENQUEUE;
@@ -987,6 +997,73 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr,
 			    NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00);
 }
 
+static int
+rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc,
+		  u16 *schq_list, u16 *schq_cnt)
+{
+	struct nix_txsch *txsch;
+	struct nix_hw *nix_hw;
+	struct rvu_pfvf *pfvf;
+	u8 cgx_id, lmac_id;
+	u16 schq_base;
+	u32 *pfvf_map;
+	int pf, intf;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -ENODEV;
+
+	pfvf = rvu_get_pfvf(rvu, pcifunc);
+	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1];
+	pfvf_map = txsch->pfvf_map;
+	pf = rvu_get_pf(pcifunc);
+
+	/* static allocation as two TL1's per link */
+	intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
+
+	switch (intf) {
+	case NIX_INTF_TYPE_CGX:
+		rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id);
+		schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2;
+		break;
+	case NIX_INTF_TYPE_LBK:
+		schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	if (schq_base + 1 > txsch->schq.max)
+		return -ENODEV;
+
+	/* init pfvf_map as we store flags */
+	if (pfvf_map[schq_base] == U32_MAX) {
+		pfvf_map[schq_base] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+		pfvf_map[schq_base + 1] =
+			TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0);
+
+		/* Onetime reset for TL1 */
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base);
+
+		nix_reset_tx_linkcfg(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+		nix_reset_tx_shaping(rvu, blkaddr,
+				     NIX_TXSCH_LVL_TL1, schq_base + 1);
+	}
+
+	if (schq_list && schq_cnt) {
+		schq_list[0] = schq_base;
+		schq_list[1] = schq_base + 1;
+		*schq_cnt = 2;
+	}
+
+	return 0;
+}
+
 int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 				     struct nix_txsch_alloc_req *req,
 				     struct nix_txsch_alloc_rsp *rsp)
@@ -997,6 +1074,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	struct rvu_pfvf *pfvf;
 	struct nix_hw *nix_hw;
 	int blkaddr, rc = 0;
+	u32 *pfvf_map;
 	u16 schq;
 
 	pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -1012,13 +1090,23 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		txsch = &nix_hw->txsch[lvl];
 		req_schq = req->schq_contig[lvl] + req->schq[lvl];
+		pfvf_map = txsch->pfvf_map;
+
+		if (!req_schq)
+			continue;
 
 		/* There are only 28 TL1s */
-		if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max)
-			goto err;
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			if (req->schq_contig[lvl] ||
+			    req->schq[lvl] > 2 ||
+			    rvu_get_tl1_schqs(rvu, blkaddr,
+					      pcifunc, NULL, NULL))
+				goto err;
+			continue;
+		}
 
 		/* Check if request is valid */
-		if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC)
+		if (req_schq > MAX_TXSCHQ_PER_FUNC)
 			goto err;
 
 		/* If contiguous queues are needed, check for availability */
@@ -1034,16 +1122,32 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
 		txsch = &nix_hw->txsch[lvl];
 		rsp->schq_contig[lvl] = req->schq_contig[lvl];
+		pfvf_map = txsch->pfvf_map;
 		rsp->schq[lvl] = req->schq[lvl];
 
-		schq = 0;
+		if (!req->schq[lvl] && !req->schq_contig[lvl])
+			continue;
+
+		/* Handle TL1 specially as it is
+		 * allocation is restricted to 2 TL1's
+		 * per link
+		 */
+
+		if (lvl == NIX_TXSCH_LVL_TL1) {
+			rsp->schq_contig[lvl] = 0;
+			rvu_get_tl1_schqs(rvu, blkaddr, pcifunc,
+					  &rsp->schq_list[lvl][0],
+					  &rsp->schq[lvl]);
+			continue;
+		}
+
 		/* Alloc contiguous queues first */
 		if (req->schq_contig[lvl]) {
 			schq = rvu_alloc_rsrc_contig(&txsch->schq,
 						     req->schq_contig[lvl]);
 
 			for (idx = 0; idx < req->schq_contig[lvl]; idx++) {
-				txsch->pfvf_map[schq] = pcifunc;
+				pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
 				nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 				nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
 				rsp->schq_contig_list[lvl][idx] = schq;
@@ -1054,7 +1158,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
 		/* Alloc non-contiguous queues */
 		for (idx = 0; idx < req->schq[lvl]; idx++) {
 			schq = rvu_alloc_rsrc(&txsch->schq);
-			txsch->pfvf_map[schq] = pcifunc;
+			pfvf_map[schq] = TXSCH_MAP(pcifunc, 0);
 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 			nix_reset_tx_shaping(rvu, blkaddr, lvl, schq);
 			rsp->schq_list[lvl][idx] = schq;
@@ -1096,7 +1200,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 
 		txsch = &nix_hw->txsch[lvl];
 		for (schq = 0; schq < txsch->schq.max; schq++) {
-			if (txsch->pfvf_map[schq] != pcifunc)
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 				continue;
 			nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq);
 		}
@@ -1105,7 +1209,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 	/* Flush SMQs */
 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
 	for (schq = 0; schq < txsch->schq.max; schq++) {
-		if (txsch->pfvf_map[schq] != pcifunc)
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 			continue;
 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
 		/* Do SMQ flush and set enqueue xoff */
@@ -1123,9 +1227,15 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
 
 	/* Now free scheduler queues to free pool */
 	for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+		/* Free all SCHQ's except TL1 as
+		 * TL1 is shared across all VF's for a RVU PF
+		 */
+		if (lvl == NIX_TXSCH_LVL_TL1)
+			continue;
+
 		txsch = &nix_hw->txsch[lvl];
 		for (schq = 0; schq < txsch->schq.max; schq++) {
-			if (txsch->pfvf_map[schq] != pcifunc)
+			if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 				continue;
 			rvu_free_rsrc(&txsch->schq, schq);
 			txsch->pfvf_map[schq] = 0;
@@ -1187,16 +1297,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr,
 	return true;
 }
 
+static int
+nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc)
+{
+	u16 schq_list[2], schq_cnt, schq;
+	int blkaddr, idx, err = 0;
+	u16 map_func, map_flags;
+	struct nix_hw *nix_hw;
+	u64 reg, regval;
+	u32 *pfvf_map;
+
+	blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+	if (blkaddr < 0)
+		return NIX_AF_ERR_AF_LF_INVALID;
+
+	nix_hw = get_nix_hw(rvu->hw, blkaddr);
+	if (!nix_hw)
+		return -EINVAL;
+
+	pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map;
+
+	mutex_lock(&rvu->rsrc_lock);
+
+	err = rvu_get_tl1_schqs(rvu, blkaddr,
+				pcifunc, schq_list, &schq_cnt);
+	if (err)
+		goto unlock;
+
+	for (idx = 0; idx < schq_cnt; idx++) {
+		schq = schq_list[idx];
+		map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+		map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+		/* check if config is already done or this is pf */
+		if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE)
+			continue;
+
+		/* default configuration */
+		reg = NIX_AF_TL1X_TOPOLOGY(schq);
+		regval = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_SCHEDULE(schq);
+		regval = TXSCH_TL1_DFLT_RR_QTM;
+		rvu_write64(rvu, blkaddr, reg, regval);
+		reg = NIX_AF_TL1X_CIR(schq);
+		regval = 0;
+		rvu_write64(rvu, blkaddr, reg, regval);
+
+		map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+		pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+	}
+unlock:
+	mutex_unlock(&rvu->rsrc_lock);
+	return err;
+}
+
 int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 				    struct nix_txschq_config *req,
 				    struct msg_rsp *rsp)
 {
+	u16 schq, pcifunc = req->hdr.pcifunc;
 	struct rvu_hwinfo *hw = rvu->hw;
-	u16 pcifunc = req->hdr.pcifunc;
 	u64 reg, regval, schq_regbase;
 	struct nix_txsch *txsch;
+	u16 map_func, map_flags;
 	struct nix_hw *nix_hw;
 	int blkaddr, idx, err;
+	u32 *pfvf_map;
 	int nixlf;
 
 	if (req->lvl >= NIX_TXSCH_LVL_CNT ||
@@ -1216,6 +1383,16 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 		return NIX_AF_ERR_AF_LF_INVALID;
 
 	txsch = &nix_hw->txsch[req->lvl];
+	pfvf_map = txsch->pfvf_map;
+
+	/* VF is only allowed to trigger
+	 * setting default cfg on TL1
+	 */
+	if (pcifunc & RVU_PFVF_FUNC_MASK &&
+	    req->lvl == NIX_TXSCH_LVL_TL1) {
+		return nix_tl1_default_cfg(rvu, pcifunc);
+	}
+
 	for (idx = 0; idx < req->num_regs; idx++) {
 		reg = req->reg[idx];
 		regval = req->regval[idx];
@@ -1233,6 +1410,21 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu,
 			regval |= ((u64)nixlf << 24);
 		}
 
+		/* Mark config as done for TL1 by PF */
+		if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) &&
+		    schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) {
+			schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT);
+
+			mutex_lock(&rvu->rsrc_lock);
+
+			map_func = TXSCH_MAP_FUNC(pfvf_map[schq]);
+			map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]);
+
+			map_flags |= NIX_TXSCHQ_TL1_CFG_DONE;
+			pfvf_map[schq] = TXSCH_MAP(map_func, map_flags);
+			mutex_unlock(&rvu->rsrc_lock);
+		}
+
 		rvu_write64(rvu, blkaddr, reg, regval);
 
 		/* Check for SMQ flush, if so, poll for its completion */
@@ -1559,9 +1751,10 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
 		 * PF/VF pcifunc mapping info.
 		 */
 		txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max,
-					       sizeof(u16), GFP_KERNEL);
+					       sizeof(u32), GFP_KERNEL);
 		if (!txsch->pfvf_map)
 			return -ENOMEM;
+		memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32));
 	}
 	return 0;
 }
@@ -2020,7 +2213,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
 	txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
 	mutex_lock(&rvu->rsrc_lock);
 	for (schq = 0; schq < txsch->schq.max; schq++) {
-		if (txsch->pfvf_map[schq] != pcifunc)
+		if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc)
 			continue;
 		cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq));
 		cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8);