@@ -191,6 +191,7 @@ enum nix_scheduler {
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
#define NIX_INTF_TYPE_SDP 2
+#define NIX_INTF_TYPE_CPT 3
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
@@ -338,6 +338,9 @@ M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
M(NIX_LF_STATS, 0x802e, nix_lf_stats, nix_stats_req, nix_stats_rsp) \
+M(NIX_ALLOC_BPIDS, 0x8028, nix_alloc_bpids, nix_alloc_bpid_req, nix_bpids) \
+M(NIX_FREE_BPIDS, 0x8029, nix_free_bpids, nix_bpids, msg_rsp) \
+M(NIX_RX_CHAN_CFG, 0x802a, nix_rx_chan_cfg, nix_rx_chan_cfg, nix_rx_chan_cfg) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -1347,6 +1350,29 @@ struct nix_mcast_grp_update_rsp {
u32 mce_start_index;
};
+struct nix_alloc_bpid_req {
+ struct mbox_msghdr hdr;
+ u8 bpid_cnt;
+ u8 type;
+ u64 rsvd;
+};
+
+struct nix_bpids {
+ struct mbox_msghdr hdr;
+ u8 bpid_cnt;
+ u16 bpids[8];
+ u64 rsvd;
+};
+
+struct nix_rx_chan_cfg {
+ struct mbox_msghdr hdr;
+ u8 type; /* Interface type(CGX/CPT/LBK) */
+ u8 read;
+ u16 chan; /* RX channel to be configured */
+ u64 val; /* NIX_AF_RX_CHAN_CFG value */
+ u64 rsvd;
+};
+
/* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr;
@@ -569,6 +569,106 @@ void rvu_nix_flr_free_bpids(struct rvu *rvu, u16 pcifunc)
mutex_unlock(&rvu->rsrc_lock);
}
+int rvu_mbox_handler_nix_rx_chan_cfg(struct rvu *rvu,
+ struct nix_rx_chan_cfg *req,
+ struct nix_rx_chan_cfg *rsp)
+{
+ struct rvu_pfvf *pfvf;
+ int blkaddr;
+ u16 chan;
+
+ pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
+ chan = pfvf->rx_chan_base + req->chan;
+
+ if (req->type == NIX_INTF_TYPE_CPT)
+ chan = chan | BIT(11);
+
+ if (req->read) {
+ rsp->val = rvu_read64(rvu, blkaddr,
+ NIX_AF_RX_CHANX_CFG(chan));
+ rsp->chan = req->chan;
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), req->val);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_nix_alloc_bpids(struct rvu *rvu,
+ struct nix_alloc_bpid_req *req,
+ struct nix_bpids *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_hw *nix_hw;
+ int blkaddr, cnt = 0;
+ struct nix_bp *bp;
+ int bpid, err;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ bp = &nix_hw->bp;
+
+ /* For interface like sso uses same bpid across multiple
+ * application. Find the bpid is it already allocate or
+ * allocate a new one.
+ */
+ if (req->type > NIX_INTF_TYPE_CPT) {
+ for (bpid = 0; bpid < bp->bpids.max; bpid++) {
+ if (bp->intf_map[bpid] == req->type) {
+ rsp->bpids[cnt] = bpid + bp->free_pool_base;
+ rsp->bpid_cnt++;
+ bp->ref_cnt[bpid]++;
+ cnt++;
+ }
+ }
+ if (rsp->bpid_cnt)
+ return 0;
+ }
+
+ for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+ bpid = rvu_alloc_rsrc(&bp->bpids);
+ if (bpid < 0)
+ return 0;
+ rsp->bpids[cnt] = bpid + bp->free_pool_base;
+ bp->intf_map[bpid] = req->type;
+ bp->fn_map[bpid] = pcifunc;
+ bp->ref_cnt[bpid]++;
+ rsp->bpid_cnt++;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_nix_free_bpids(struct rvu *rvu,
+ struct nix_bpids *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr, cnt, err, id;
+ struct nix_hw *nix_hw;
+ struct nix_bp *bp;
+ u16 bpid;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ bp = &nix_hw->bp;
+ for (cnt = 0; cnt < req->bpid_cnt; cnt++) {
+ bpid = req->bpids[cnt] - bp->free_pool_base;
+ bp->ref_cnt[bpid]--;
+ if (bp->ref_cnt[bpid])
+ continue;
+ rvu_free_rsrc(&bp->bpids, bpid);
+ for (id = 0; id < bp->bpids.max; id++) {
+ if (bp->fn_map[id] == pcifunc)
+ bp->fn_map[id] = 0;
+ }
+ }
+ return 0;
+}
+
static u16 nix_get_channel(u16 chan, bool cpt_link)
{
/* CPT channel for a given link channel is always