@@ -4830,6 +4830,49 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
}
}
+static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
+{
+ int entries;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
+ else
+ entries = HW_HASH_INDEX_SIZE;
+
+ bp->rss_indir_tbl_entries = entries;
+ bp->rss_indir_tbl = kcalloc(entries, sizeof(*bp->rss_indir_tbl),
+ GFP_KERNEL);
+ if (!bp->rss_indir_tbl)
+ return -ENOMEM;
+ return 0;
+}
+
+static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+{
+ u16 max_rings, max_entries, pad, i;
+
+ if (!bp->rx_nr_rings)
+ return;
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ max_entries = (max_rings + BNXT_RSS_TABLE_ENTRIES_P5 - 1) &
+ ~(BNXT_RSS_TABLE_ENTRIES_P5 - 1);
+ else
+ max_entries = HW_HASH_INDEX_SIZE;
+
+ for (i = 0; i < max_entries; i++)
+ bp->rss_indir_tbl[i] = i % max_rings;
+
+ pad = bp->rss_indir_tbl_entries - max_entries;
+ if (pad)
+ memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+}
+
static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
{
u32 i, j, max_rings;
@@ -11514,6 +11557,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
bnxt_free_port_stats(bp);
free_netdev(dev);
}
@@ -12034,6 +12079,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ rc = bnxt_alloc_rss_indir_tbl(bp);
+ if (rc)
+ goto init_err_pci_clean;
+ bnxt_set_dflt_rss_indir_tbl(bp);
+
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
@@ -12078,6 +12128,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_free_ctx_mem(bp);
kfree(bp->ctx);
bp->ctx = NULL;
+ kfree(bp->rss_indir_tbl);
+ bp->rss_indir_tbl = NULL;
init_err_free:
free_netdev(dev);
@@ -1023,6 +1023,8 @@ struct bnxt_vnic_info {
#define BNXT_RSS_TABLE_MAX_TBL_P5 8
#define BNXT_MAX_RSS_TABLE_SIZE_P5 \
(BNXT_RSS_TABLE_SIZE_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
+#define BNXT_MAX_RSS_TABLE_ENTRIES_P5 \
+ (BNXT_RSS_TABLE_ENTRIES_P5 * BNXT_RSS_TABLE_MAX_TBL_P5)
u32 rx_mask;
@@ -1655,6 +1657,8 @@ struct bnxt {
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
int nr_vnics;
+ u16 *rss_indir_tbl;
+ u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u16 max_mtu;
The driver currently does not keep track of the logical RSS indirection table. The hardware RSS table is set up with standard default ring distribution when initializing the chip. This makes it difficult to support user sepcified indirection table entries. As a first step, add the logical table in the main bnxt structure and allocate it according to chip specific table size. Add a function that sets up default RSS distribution based on the number of RX rings. Signed-off-by: Michael Chan <michael.chan@broadcom.com> --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 52 +++++++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +++ 2 files changed, 56 insertions(+)