@@ -30,6 +30,7 @@ EXPORT_MAP := rte_pmd_dpaa_version.map
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += fmlib/fm_lib.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += fmlib/fm_vsp.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_flow.c
SRCS-$(CONFIG_RTE_LIBRTE_DPAA_PMD) += dpaa_rxtx.c
LDLIBS += -lrte_bus_dpaa
@@ -39,6 +39,7 @@
#include <dpaa_ethdev.h>
#include <dpaa_rxtx.h>
+#include <dpaa_flow.h>
#include <rte_pmd_dpaa.h>
#include <fsl_usd.h>
@@ -78,6 +79,7 @@ static uint64_t dev_tx_offloads_nodis =
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
static int default_q; /* use default queue - FMC is not executed*/
/* At present we only allow up to 4 push mode queues as default - as each of
* this queue need dedicated portal and we are short of portals.
@@ -1294,16 +1296,15 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
}
};
- if (fqid) {
+ if (fmc_q || default_q) {
ret = qman_reserve_fqid(fqid);
if (ret) {
- DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
fqid, ret);
return -EINVAL;
}
- } else {
- flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
}
+
DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
ret = qman_create_fq(fqid, flags, fq);
if (ret) {
@@ -1478,7 +1479,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
struct fman_if_bpool *bp, *tmp_bp;
uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
uint32_t cgrid_tx[MAX_DPAA_CORES];
- char eth_buf[RTE_ETHER_ADDR_FMT_SIZE];
+ uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
PMD_INIT_FUNC_TRACE();
@@ -1495,30 +1496,36 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
+ memset((char *)dev_rx_fqids, 0,
+ sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
+
/* Initialize Rx FQ's */
if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ } else if (fmc_q) {
+ num_rx_fqs = 1;
} else {
- if (getenv("DPAA_NUM_RX_QUEUES"))
- num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
- else
- num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ /* FMCLESS mode, load balance to multiple cores.*/
+ num_rx_fqs = rte_lcore_count();
}
-
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
- if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
+ if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
DPAA_PMD_ERR("Invalid number of RX queues\n");
return -EINVAL;
}
- dpaa_intf->rx_queues = rte_zmalloc(NULL,
- sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
- if (!dpaa_intf->rx_queues) {
- DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
- return -ENOMEM;
+ if (num_rx_fqs > 0) {
+ dpaa_intf->rx_queues = rte_zmalloc(NULL,
+ sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->rx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+ return -ENOMEM;
+ }
+ } else {
+ dpaa_intf->rx_queues = NULL;
}
memset(cgrid, 0, sizeof(cgrid));
@@ -1537,7 +1544,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
}
/* If congestion control is enabled globally*/
- if (td_threshold) {
+ if (num_rx_fqs > 0 && td_threshold) {
dpaa_intf->cgr_rx = rte_zmalloc(NULL,
sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
if (!dpaa_intf->cgr_rx) {
@@ -1556,12 +1563,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
dpaa_intf->cgr_rx = NULL;
}
+ if (!fmc_q && !default_q) {
+ ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
+ num_rx_fqs, 0);
+ if (ret < 0) {
+ DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
+ goto free_rx;
+ }
+ }
+
for (loop = 0; loop < num_rx_fqs; loop++) {
if (default_q)
fqid = cfg->rx_def;
else
- fqid = DPAA_PCD_FQID_START + fman_intf->mac_idx *
- DPAA_PCD_FQID_MULTIPLIER + loop;
+ fqid = dev_rx_fqids[loop];
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
@@ -1658,9 +1673,16 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
/* copy the primary mac address */
rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
- rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr);
- DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf);
+ RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ dpaa_device->name,
+ fman_intf->mac_addr.addr_bytes[0],
+ fman_intf->mac_addr.addr_bytes[1],
+ fman_intf->mac_addr.addr_bytes[2],
+ fman_intf->mac_addr.addr_bytes[3],
+ fman_intf->mac_addr.addr_bytes[4],
+ fman_intf->mac_addr.addr_bytes[5]);
+
/* Disable RX mode */
fman_if_discard_rx_errors(fman_intf);
@@ -1707,6 +1729,12 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
return -1;
}
+ /* DPAA FM deconfig */
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
+ DPAA_PMD_WARN("DPAA FM deconfig failed\n");
+ }
+
dpaa_eth_dev_close(dev);
/* release configuration memory */
@@ -1750,7 +1778,7 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
}
static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
struct rte_dpaa_device *dpaa_dev)
{
int diag;
@@ -1796,6 +1824,13 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
default_q = 1;
}
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_init()) {
+ DPAA_PMD_ERR("FM init failed\n");
+ return -1;
+ }
+ }
+
/* disabling the default push mode for LS1043 */
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
dpaa_push_mode_max_queue = 0;
@@ -1869,6 +1904,38 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
return 0;
}
+static void __attribute__((destructor(102))) dpaa_finish(void)
+{
+ /* For secondary, primary will do all the cleanup */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ if (!(default_q || fmc_q)) {
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
+ struct rte_eth_dev *dev = &rte_eth_devices[i];
+ struct dpaa_if *dpaa_intf =
+ dev->data->dev_private;
+ struct fman_if *fif =
+ dev->process_private;
+ if (dpaa_intf->port_handle)
+ if (dpaa_fm_deconfig(dpaa_intf, fif))
+ DPAA_PMD_WARN("DPAA FM "
+ "deconfig failed\n");
+ }
+ }
+ if (is_global_init)
+ if (dpaa_fm_term())
+ DPAA_PMD_WARN("DPAA FM term failed\n");
+
+ is_global_init = 0;
+
+ DPAA_PMD_INFO("DPAA fman cleaned up");
+ }
+}
+
static struct rte_dpaa_driver rte_dpaa_pmd = {
.drv_flags = RTE_DPAA_DRV_INTR_LSC,
.drv_type = FSL_DPAA_ETH,
@@ -118,6 +118,10 @@ struct dpaa_if {
uint32_t ifid;
struct dpaa_bp_info *bp_info;
struct rte_eth_fc_conf *fc_conf;
+ void *port_handle;
+ void *netenv_handle;
+ void *scheme_handle[2];
+ uint32_t scheme_count;
};
struct dpaa_if_stats {
new file mode 100644
@@ -0,0 +1,905 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2019 NXP
+ */
+
+/* System headers */
+#include <stdio.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <sys/types.h>
+
+#include <dpaa_ethdev.h>
+#include <dpaa_flow.h>
+#include <rte_dpaa_logs.h>
+#include <fmlib/fm_port_ext.h>
+
+#define DPAA_MAX_NUM_ETH_DEV 8
+
+static inline
+ioc_fm_pcd_extract_entry_t *
+SCH_EXT_ARR(ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+return &scheme_params->param.key_extract_and_hash_params.extract_array[hdr_idx];
+}
+
+#define SCH_EXT_HDR(scheme_params, hdr_idx) \
+ SCH_EXT_ARR(scheme_params, hdr_idx)->extract_params.extract_by_hdr
+
+#define SCH_EXT_FULL_FLD(scheme_params, hdr_idx) \
+ SCH_EXT_HDR(scheme_params, hdr_idx).extract_by_hdr_type.full_field
+
+/* FM global info */
+struct dpaa_fm_info {
+ t_Handle fman_handle;
+ t_Handle pcd_handle;
+};
+
+/*FM model to read and write from file */
+struct dpaa_fm_model {
+ uint32_t dev_count;
+ uint8_t device_order[DPAA_MAX_NUM_ETH_DEV];
+ t_FmPortParams fm_port_params[DPAA_MAX_NUM_ETH_DEV];
+ t_Handle netenv_devid[DPAA_MAX_NUM_ETH_DEV];
+ t_Handle scheme_devid[DPAA_MAX_NUM_ETH_DEV][2];
+};
+
+static struct dpaa_fm_info fm_info;
+static struct dpaa_fm_model fm_model;
+static const char *fm_log = "/tmp/fmdpdk.bin";
+
+static void fm_prev_cleanup(void)
+{
+ uint32_t fman_id = 0, i = 0, devid;
+ struct dpaa_if dpaa_intf = {0};
+ t_FmPcdParams fmPcdParams = {0};
+ PMD_INIT_FUNC_TRACE();
+
+ fm_info.fman_handle = FM_Open(fman_id);
+ if (!fm_info.fman_handle) {
+ printf("\n%s- unable to open FMAN", __func__);
+ return;
+ }
+
+ fmPcdParams.h_Fm = fm_info.fman_handle;
+ fmPcdParams.prsSupport = true;
+ fmPcdParams.kgSupport = true;
+ /* FM PCD Open */
+ fm_info.pcd_handle = FM_PCD_Open(&fmPcdParams);
+ if (!fm_info.pcd_handle) {
+ printf("\n%s- unable to open PCD", __func__);
+ return;
+ }
+
+ while (i < fm_model.dev_count) {
+ devid = fm_model.device_order[i];
+ /* FM Port Open */
+ fm_model.fm_port_params[devid].h_Fm = fm_info.fman_handle;
+ dpaa_intf.port_handle =
+ FM_PORT_Open(&fm_model.fm_port_params[devid]);
+ dpaa_intf.scheme_handle[0] = CreateDevice(fm_info.pcd_handle,
+ fm_model.scheme_devid[devid][0]);
+ dpaa_intf.scheme_count = 1;
+ if (fm_model.scheme_devid[devid][1]) {
+ dpaa_intf.scheme_handle[1] =
+ CreateDevice(fm_info.pcd_handle,
+ fm_model.scheme_devid[devid][1]);
+ if (dpaa_intf.scheme_handle[1])
+ dpaa_intf.scheme_count++;
+ }
+
+ dpaa_intf.netenv_handle = CreateDevice(fm_info.pcd_handle,
+ fm_model.netenv_devid[devid]);
+ i++;
+ if (!dpaa_intf.netenv_handle ||
+ !dpaa_intf.scheme_handle[0] ||
+ !dpaa_intf.port_handle)
+ continue;
+
+ if (dpaa_fm_deconfig(&dpaa_intf, NULL))
+ printf("\nDPAA FM deconfig failed\n");
+ }
+
+ if (dpaa_fm_term())
+ printf("\nDPAA FM term failed\n");
+
+ memset(&fm_model, 0, sizeof(struct dpaa_fm_model));
+}
+
+void dpaa_write_fm_config_to_file(void)
+{
+ size_t bytes_write;
+ FILE *fp = fopen(fm_log, "wb");
+ PMD_INIT_FUNC_TRACE();
+
+ if (!fp) {
+ DPAA_PMD_ERR("File open failed");
+ return;
+ }
+ bytes_write = fwrite(&fm_model, sizeof(struct dpaa_fm_model), 1, fp);
+ if (!bytes_write) {
+ DPAA_PMD_WARN("No bytes write");
+ fclose(fp);
+ return;
+ }
+ fclose(fp);
+}
+
+static void dpaa_read_fm_config_from_file(void)
+{
+ size_t bytes_read;
+ FILE *fp = fopen(fm_log, "rb");
+ PMD_INIT_FUNC_TRACE();
+
+ if (!fp)
+ return;
+ DPAA_PMD_INFO("Previous DPDK-FM config instance present, cleaning up.");
+
+ bytes_read = fread(&fm_model, sizeof(struct dpaa_fm_model), 1, fp);
+ if (!bytes_read) {
+ DPAA_PMD_WARN("No bytes read");
+ fclose(fp);
+ return;
+ }
+ fclose(fp);
+
+ /*FM cleanup from previous configured app */
+ fm_prev_cleanup();
+}
+
+static inline int set_hashParams_eth(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_ETH;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).eth =
+ IOC_NET_HEADER_FIELD_ETH_SA;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).eth =
+ IOC_NET_HEADER_FIELD_ETH_DA;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+static inline int set_hashParams_ipv4(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_IPv4;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).ipv4 =
+ IOC_NET_HEADER_FIELD_IPv4_SRC_IP;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).ipv4 =
+ IOC_NET_HEADER_FIELD_IPv4_DST_IP;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+static inline int set_hashParams_ipv6(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_IPv6;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).ipv6 =
+ IOC_NET_HEADER_FIELD_IPv6_SRC_IP;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).ipv6 =
+ IOC_NET_HEADER_FIELD_IPv6_DST_IP;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+static inline int set_hashParams_udp(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_UDP;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).udp =
+ IOC_NET_HEADER_FIELD_UDP_PORT_SRC;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).udp =
+ IOC_NET_HEADER_FIELD_UDP_PORT_DST;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+static inline int set_hashParams_tcp(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_TCP;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).tcp =
+ IOC_NET_HEADER_FIELD_TCP_PORT_SRC;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).tcp =
+ IOC_NET_HEADER_FIELD_TCP_PORT_DST;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+static inline int set_hashParams_sctp(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
+{
+ int k;
+
+ for (k = 0; k < 2; k++) {
+ SCH_EXT_ARR(scheme_params, hdr_idx)->type =
+ e_IOC_FM_PCD_EXTRACT_BY_HDR;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr =
+ HEADER_TYPE_SCTP;
+ SCH_EXT_HDR(scheme_params, hdr_idx).hdr_index =
+ e_IOC_FM_PCD_HDR_INDEX_NONE;
+ SCH_EXT_HDR(scheme_params, hdr_idx).type =
+ e_IOC_FM_PCD_EXTRACT_FULL_FIELD;
+ if (k == 0)
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).sctp =
+ IOC_NET_HEADER_FIELD_SCTP_PORT_SRC;
+ else
+ SCH_EXT_FULL_FLD(scheme_params, hdr_idx).sctp =
+ IOC_NET_HEADER_FIELD_SCTP_PORT_DST;
+ hdr_idx++;
+ }
+ return hdr_idx;
+}
+
+/* Set scheme params for hash distribution */
+static int set_scheme_params(
+ ioc_fm_pcd_kg_scheme_params_t *scheme_params,
+ ioc_fm_pcd_net_env_params_t *dist_units,
+ struct dpaa_if *dpaa_intf,
+ struct fman_if *fif __rte_unused)
+{
+ int dist_idx, hdr_idx = 0;
+ PMD_INIT_FUNC_TRACE();
+
+ scheme_params->param.use_hash = 1;
+ scheme_params->param.modify = false;
+ scheme_params->param.always_direct = false;
+ scheme_params->param.scheme_counter.update = 1;
+ scheme_params->param.scheme_counter.value = 0;
+ scheme_params->param.next_engine = e_IOC_FM_PCD_DONE;
+ scheme_params->param.base_fqid = dpaa_intf->rx_queues[0].fqid;
+ scheme_params->param.net_env_params.net_env_id =
+ dpaa_intf->netenv_handle;
+ scheme_params->param.net_env_params.num_of_distinction_units =
+ dist_units->param.num_of_distinction_units;
+
+ scheme_params->param.key_extract_and_hash_params
+ .hash_distribution_num_of_fqids =
+ dpaa_intf->nb_rx_queues;
+ scheme_params->param.key_extract_and_hash_params
+ .num_of_used_extracts =
+ 2 * dist_units->param.num_of_distinction_units;
+
+ for (dist_idx = 0; dist_idx <
+ dist_units->param.num_of_distinction_units;
+ dist_idx++) {
+ switch (dist_units->param.units[dist_idx].hdrs[0].hdr) {
+ case HEADER_TYPE_ETH:
+ hdr_idx = set_hashParams_eth(scheme_params, hdr_idx);
+ break;
+
+ case HEADER_TYPE_IPv4:
+ hdr_idx = set_hashParams_ipv4(scheme_params, hdr_idx);
+ break;
+
+ case HEADER_TYPE_IPv6:
+ hdr_idx = set_hashParams_ipv6(scheme_params, hdr_idx);
+ break;
+
+ case HEADER_TYPE_UDP:
+ hdr_idx = set_hashParams_udp(scheme_params, hdr_idx);
+ break;
+
+ case HEADER_TYPE_TCP:
+ hdr_idx = set_hashParams_tcp(scheme_params, hdr_idx);
+ break;
+
+ case HEADER_TYPE_SCTP:
+ hdr_idx = set_hashParams_sctp(scheme_params, hdr_idx);
+ break;
+
+ default:
+ DPAA_PMD_ERR("Invalid Distinction Unit");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void set_dist_units(ioc_fm_pcd_net_env_params_t *dist_units,
+ uint64_t req_dist_set)
+{
+ uint32_t loop = 0, dist_idx = 0, dist_field = 0;
+ int l2_configured = 0, ipv4_configured = 0, ipv6_configured = 0;
+ int udp_configured = 0, tcp_configured = 0, sctp_configured = 0;
+ PMD_INIT_FUNC_TRACE();
+
+ if (!req_dist_set)
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_ETH;
+
+ while (req_dist_set) {
+ if (req_dist_set % 2 != 0) {
+ dist_field = 1U << loop;
+ switch (dist_field) {
+ case ETH_RSS_L2_PAYLOAD:
+
+ if (l2_configured)
+ break;
+ l2_configured = 1;
+
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_ETH;
+ break;
+
+ case ETH_RSS_IPV4:
+ case ETH_RSS_FRAG_IPV4:
+ case ETH_RSS_NONFRAG_IPV4_OTHER:
+
+ if (ipv4_configured)
+ break;
+ ipv4_configured = 1;
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_IPv4;
+ break;
+
+ case ETH_RSS_IPV6:
+ case ETH_RSS_FRAG_IPV6:
+ case ETH_RSS_NONFRAG_IPV6_OTHER:
+ case ETH_RSS_IPV6_EX:
+
+ if (ipv6_configured)
+ break;
+ ipv6_configured = 1;
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_IPv6;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_TCP:
+ case ETH_RSS_NONFRAG_IPV6_TCP:
+ case ETH_RSS_IPV6_TCP_EX:
+
+ if (tcp_configured)
+ break;
+ tcp_configured = 1;
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_TCP;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_UDP:
+ case ETH_RSS_NONFRAG_IPV6_UDP:
+ case ETH_RSS_IPV6_UDP_EX:
+
+ if (udp_configured)
+ break;
+ udp_configured = 1;
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_UDP;
+ break;
+
+ case ETH_RSS_NONFRAG_IPV4_SCTP:
+ case ETH_RSS_NONFRAG_IPV6_SCTP:
+
+ if (sctp_configured)
+ break;
+ sctp_configured = 1;
+
+ dist_units->param.units[dist_idx++].hdrs[0].hdr =
+ HEADER_TYPE_SCTP;
+ break;
+
+ default:
+ DPAA_PMD_ERR("Bad flow distribution option");
+ }
+ }
+ req_dist_set = req_dist_set >> 1;
+ loop++;
+ }
+
+ /* Dist units is set to dist_idx */
+ dist_units->param.num_of_distinction_units = dist_idx;
+}
+
+/* Apply PCD configuration on interface */
+static inline int set_port_pcd(struct dpaa_if *dpaa_intf)
+{
+ int ret = 0;
+ unsigned int idx;
+ ioc_fm_port_pcd_params_t pcd_param;
+ ioc_fm_port_pcd_prs_params_t prs_param;
+ ioc_fm_port_pcd_kg_params_t kg_param;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* PCD support for hash distribution */
+ uint8_t pcd_support = e_FM_PORT_PCD_SUPPORT_PRS_AND_KG;
+
+ memset(&pcd_param, 0, sizeof(pcd_param));
+ memset(&prs_param, 0, sizeof(prs_param));
+ memset(&kg_param, 0, sizeof(kg_param));
+
+ /* Set parse params */
+ prs_param.first_prs_hdr = HEADER_TYPE_ETH;
+
+ /* Set kg params */
+ for (idx = 0; idx < dpaa_intf->scheme_count; idx++)
+ kg_param.scheme_ids[idx] = dpaa_intf->scheme_handle[idx];
+ kg_param.num_of_schemes = dpaa_intf->scheme_count;
+
+ /* Set pcd params */
+ pcd_param.net_env_id = dpaa_intf->netenv_handle;
+ pcd_param.pcd_support = pcd_support;
+ pcd_param.p_kg_params = &kg_param;
+ pcd_param.p_prs_params = &prs_param;
+
+ /* FM PORT Disable */
+ ret = FM_PORT_Disable(dpaa_intf->port_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_Disable: Failed");
+ return ret;
+ }
+
+ /* FM PORT SetPCD */
+ ret = FM_PORT_SetPCD(dpaa_intf->port_handle, &pcd_param);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_SetPCD: Failed");
+ return ret;
+ }
+
+ /* FM PORT Enable */
+ ret = FM_PORT_Enable(dpaa_intf->port_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_Enable: Failed");
+ goto fm_port_delete_pcd;
+ }
+
+ return 0;
+
+fm_port_delete_pcd:
+ /* FM PORT DeletePCD */
+ ret = FM_PORT_DeletePCD(dpaa_intf->port_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_DeletePCD: Failed\n");
+ return ret;
+ }
+ return -1;
+}
+
+/* Unset PCD NerEnv and scheme */
+static inline void unset_pcd_netenv_scheme(struct dpaa_if *dpaa_intf)
+{
+ int ret;
+ PMD_INIT_FUNC_TRACE();
+
+ /* reduce scheme count */
+ if (dpaa_intf->scheme_count)
+ dpaa_intf->scheme_count--;
+
+ DPAA_PMD_DEBUG("KG SCHEME DEL %d handle =%p",
+ dpaa_intf->scheme_count,
+ dpaa_intf->scheme_handle[dpaa_intf->scheme_count]);
+
+ ret = FM_PCD_KgSchemeDelete(
+ dpaa_intf->scheme_handle[dpaa_intf->scheme_count]);
+ if (ret != E_OK)
+ DPAA_PMD_ERR("FM_PCD_KgSchemeDelete: Failed");
+
+ dpaa_intf->scheme_handle[dpaa_intf->scheme_count] = NULL;
+}
+
+/* Set PCD NetEnv and Scheme and default scheme */
+static inline int set_default_scheme(struct dpaa_if *dpaa_intf)
+{
+ ioc_fm_pcd_kg_scheme_params_t scheme_params;
+ int idx = dpaa_intf->scheme_count;
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set PCD NetEnvCharacteristics */
+ memset(&scheme_params, 0, sizeof(scheme_params));
+
+ /* Adding 10 to default schemes as the number of interface would be
+ * lesser than 10 and the relative scheme ids should be unique for
+ * every scheme.
+ */
+ scheme_params.param.scm_id.relative_scheme_id =
+ 10 + dpaa_intf->ifid;
+ scheme_params.param.use_hash = 0;
+ scheme_params.param.next_engine = e_IOC_FM_PCD_DONE;
+ scheme_params.param.net_env_params.num_of_distinction_units = 0;
+ scheme_params.param.net_env_params.net_env_id =
+ dpaa_intf->netenv_handle;
+ scheme_params.param.base_fqid = dpaa_intf->rx_queues[0].fqid;
+ scheme_params.param.key_extract_and_hash_params
+ .hash_distribution_num_of_fqids = 1;
+ scheme_params.param.key_extract_and_hash_params
+ .num_of_used_extracts = 0;
+ scheme_params.param.modify = false;
+ scheme_params.param.always_direct = false;
+ scheme_params.param.scheme_counter.update = 1;
+ scheme_params.param.scheme_counter.value = 0;
+
+ /* FM PCD KgSchemeSet */
+ dpaa_intf->scheme_handle[idx] =
+ FM_PCD_KgSchemeSet(fm_info.pcd_handle, &scheme_params);
+ DPAA_PMD_DEBUG("KG SCHEME SET %d handle =%p",
+ idx, dpaa_intf->scheme_handle[idx]);
+ if (!dpaa_intf->scheme_handle[idx]) {
+ DPAA_PMD_ERR("FM_PCD_KgSchemeSet: Failed");
+ return -1;
+ }
+
+ fm_model.scheme_devid[dpaa_intf->ifid][idx] =
+ GetDeviceId(dpaa_intf->scheme_handle[idx]);
+ dpaa_intf->scheme_count++;
+ return 0;
+}
+
+
+/* Set PCD NetEnv and Scheme and default scheme */
+static inline int set_pcd_netenv_scheme(struct dpaa_if *dpaa_intf,
+ uint64_t req_dist_set,
+ struct fman_if *fif)
+{
+ int ret = -1;
+ ioc_fm_pcd_net_env_params_t dist_units;
+ ioc_fm_pcd_kg_scheme_params_t scheme_params;
+ int idx = dpaa_intf->scheme_count;
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set PCD NetEnvCharacteristics */
+ memset(&dist_units, 0, sizeof(dist_units));
+ memset(&scheme_params, 0, sizeof(scheme_params));
+
+ /* Set dist unit header type */
+ set_dist_units(&dist_units, req_dist_set);
+
+ scheme_params.param.scm_id.relative_scheme_id = dpaa_intf->ifid;
+
+ /* Set PCD Scheme params */
+ ret = set_scheme_params(&scheme_params, &dist_units, dpaa_intf, fif);
+ if (ret) {
+ DPAA_PMD_ERR("Set scheme params: Failed");
+ return -1;
+ }
+
+ /* FM PCD KgSchemeSet */
+ dpaa_intf->scheme_handle[idx] =
+ FM_PCD_KgSchemeSet(fm_info.pcd_handle, &scheme_params);
+ DPAA_PMD_DEBUG("KG SCHEME SET %d handle =%p",
+ idx, dpaa_intf->scheme_handle[idx]);
+ if (!dpaa_intf->scheme_handle[idx]) {
+ DPAA_PMD_ERR("FM_PCD_KgSchemeSet: Failed");
+ return -1;
+ }
+
+ fm_model.scheme_devid[dpaa_intf->ifid][idx] =
+ GetDeviceId(dpaa_intf->scheme_handle[idx]);
+ dpaa_intf->scheme_count++;
+ return 0;
+}
+
+
+static inline int get_port_type(struct fman_if *fif)
+{
+ if (fif->mac_type == fman_mac_1g)
+ return e_FM_PORT_TYPE_RX;
+ else if (fif->mac_type == fman_mac_2_5g)
+ return e_FM_PORT_TYPE_RX_2_5G;
+ else if (fif->mac_type == fman_mac_10g)
+ return e_FM_PORT_TYPE_RX_10G;
+
+ DPAA_PMD_ERR("MAC type unsupported");
+ return -1;
+}
+
+static inline int set_fm_port_handle(struct dpaa_if *dpaa_intf,
+ uint64_t req_dist_set,
+ struct fman_if *fif)
+{
+ t_FmPortParams fm_port_params;
+ ioc_fm_pcd_net_env_params_t dist_units;
+ PMD_INIT_FUNC_TRACE();
+
+ /* FMAN mac indexes mappings (0 is unused,
+ * first 8 are for 1G, next for 10G ports
+ */
+ uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1};
+
+ /* Memset FM port params */
+ memset(&fm_port_params, 0, sizeof(fm_port_params));
+
+ /* Set FM port params */
+ fm_port_params.h_Fm = fm_info.fman_handle;
+ fm_port_params.portType = get_port_type(fif);
+ fm_port_params.portId = mac_idx[fif->mac_idx];
+
+ /* FM PORT Open */
+ dpaa_intf->port_handle = FM_PORT_Open(&fm_port_params);
+ if (!dpaa_intf->port_handle) {
+ DPAA_PMD_ERR("FM_PORT_Open: Failed\n");
+ return -1;
+ }
+
+ fm_model.fm_port_params[dpaa_intf->ifid] = fm_port_params;
+
+ /* Set PCD NetEnvCharacteristics */
+ memset(&dist_units, 0, sizeof(dist_units));
+
+ /* Set dist unit header type */
+ set_dist_units(&dist_units, req_dist_set);
+
+ /* FM PCD NetEnvCharacteristicsSet */
+ dpaa_intf->netenv_handle = FM_PCD_NetEnvCharacteristicsSet(
+ fm_info.pcd_handle, &dist_units);
+ if (!dpaa_intf->netenv_handle) {
+ DPAA_PMD_ERR("FM_PCD_NetEnvCharacteristicsSet: Failed");
+ return -1;
+ }
+
+ fm_model.netenv_devid[dpaa_intf->ifid] =
+ GetDeviceId(dpaa_intf->netenv_handle);
+
+ return 0;
+}
+
+/* De-Configure DPAA FM */
+int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf,
+ struct fman_if *fif __rte_unused)
+{
+ int ret;
+ unsigned int idx;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* FM PORT Disable */
+ ret = FM_PORT_Disable(dpaa_intf->port_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_Disable: Failed");
+ return ret;
+ }
+
+ /* FM PORT DeletePCD */
+ ret = FM_PORT_DeletePCD(dpaa_intf->port_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PORT_DeletePCD: Failed");
+ return ret;
+ }
+
+ for (idx = 0; idx < dpaa_intf->scheme_count; idx++) {
+ DPAA_PMD_DEBUG("KG SCHEME DEL %d, handle =%p",
+ idx, dpaa_intf->scheme_handle[idx]);
+ /* FM PCD KgSchemeDelete */
+ ret = FM_PCD_KgSchemeDelete(dpaa_intf->scheme_handle[idx]);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PCD_KgSchemeDelete: Failed");
+ return ret;
+ }
+ dpaa_intf->scheme_handle[idx] = NULL;
+ }
+ /* FM PCD NetEnvCharacteristicsDelete */
+ ret = FM_PCD_NetEnvCharacteristicsDelete(dpaa_intf->netenv_handle);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_PCD_NetEnvCharacteristicsDelete: Failed");
+ return ret;
+ }
+ dpaa_intf->netenv_handle = NULL;
+
+ /* FM PORT Close */
+ FM_PORT_Close(dpaa_intf->port_handle);
+ dpaa_intf->port_handle = NULL;
+
+ /* Set scheme count to 0 */
+ dpaa_intf->scheme_count = 0;
+
+ return 0;
+}
+
+int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct fman_if *fif = dev->process_private;
+ int ret;
+ unsigned int i = 0;
+ PMD_INIT_FUNC_TRACE();
+
+ if (dpaa_intf->port_handle) {
+ if (dpaa_fm_deconfig(dpaa_intf, fif))
+ DPAA_PMD_ERR("DPAA FM deconfig failed");
+ }
+
+ if (!dev->data->nb_rx_queues)
+ return 0;
+
+ if (dev->data->nb_rx_queues & (dev->data->nb_rx_queues - 1)) {
+ DPAA_PMD_ERR("No of queues should be power of 2");
+ return -1;
+ }
+
+ dpaa_intf->nb_rx_queues = dev->data->nb_rx_queues;
+
+ /* Open FM Port and set it in port info */
+ ret = set_fm_port_handle(dpaa_intf, req_dist_set, fif);
+ if (ret) {
+ DPAA_PMD_ERR("Set FM Port handle: Failed");
+ return -1;
+ }
+
+ /* Set PCD netenv and scheme */
+ if (req_dist_set) {
+ ret = set_pcd_netenv_scheme(dpaa_intf, req_dist_set, fif);
+ if (ret) {
+ DPAA_PMD_ERR("Set PCD NetEnv and Scheme dist: Failed");
+ goto unset_fm_port_handle;
+ }
+ }
+ /* Set default netenv and scheme */
+ ret = set_default_scheme(dpaa_intf);
+ if (ret) {
+ DPAA_PMD_ERR("Set PCD NetEnv and Scheme: Failed");
+ goto unset_pcd_netenv_scheme1;
+ }
+
+ /* Set Port PCD */
+ ret = set_port_pcd(dpaa_intf);
+ if (ret) {
+ DPAA_PMD_ERR("Set Port PCD: Failed");
+ goto unset_pcd_netenv_scheme;
+ }
+
+ for (; i < fm_model.dev_count; i++)
+ if (fm_model.device_order[i] == dpaa_intf->ifid)
+ return 0;
+
+ fm_model.device_order[fm_model.dev_count] = dpaa_intf->ifid;
+ fm_model.dev_count++;
+
+ return 0;
+
+unset_pcd_netenv_scheme:
+ unset_pcd_netenv_scheme(dpaa_intf);
+
+unset_pcd_netenv_scheme1:
+ unset_pcd_netenv_scheme(dpaa_intf);
+
+unset_fm_port_handle:
+ /* FM PORT Close */
+ FM_PORT_Close(dpaa_intf->port_handle);
+ dpaa_intf->port_handle = NULL;
+ return -1;
+}
+
+int dpaa_fm_init(void)
+{
+ t_Handle fman_handle;
+ t_Handle pcd_handle;
+ t_FmPcdParams fmPcdParams = {0};
+ /* Hard-coded : fman id 0 since one fman is present in LS104x */
+ int fman_id = 0, ret;
+ PMD_INIT_FUNC_TRACE();
+
+ dpaa_read_fm_config_from_file();
+
+ /* FM Open */
+ fman_handle = FM_Open(fman_id);
+ if (!fman_handle) {
+ DPAA_PMD_ERR("FM_Open: Failed");
+ return -1;
+ }
+
+ /* FM PCD Open */
+ fmPcdParams.h_Fm = fman_handle;
+ fmPcdParams.prsSupport = true;
+ fmPcdParams.kgSupport = true;
+ pcd_handle = FM_PCD_Open(&fmPcdParams);
+ if (!pcd_handle) {
+ FM_Close(fman_handle);
+ DPAA_PMD_ERR("FM_PCD_Open: Failed");
+ return -1;
+ }
+
+ /* FM PCD Enable */
+ ret = FM_PCD_Enable(pcd_handle);
+ if (ret) {
+ FM_Close(fman_handle);
+ FM_PCD_Close(pcd_handle);
+ DPAA_PMD_ERR("FM_PCD_Enable: Failed");
+ return -1;
+ }
+
+ /* Set fman and pcd handle in fm info */
+ fm_info.fman_handle = fman_handle;
+ fm_info.pcd_handle = pcd_handle;
+
+ return 0;
+}
+
+
+/* De-initialization of FM */
+int dpaa_fm_term(void)
+{
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (fm_info.pcd_handle && fm_info.fman_handle) {
+ /* FM PCD Disable */
+ ret = FM_PCD_Disable(fm_info.pcd_handle);
+ if (ret) {
+ DPAA_PMD_ERR("FM_PCD_Disable: Failed");
+ return -1;
+ }
+
+ /* FM PCD Close */
+ FM_PCD_Close(fm_info.pcd_handle);
+ fm_info.pcd_handle = NULL;
+ }
+
+ if (fm_info.fman_handle) {
+ /* FM Close */
+ FM_Close(fm_info.fman_handle);
+ fm_info.fman_handle = NULL;
+ }
+
+ if (access(fm_log, F_OK) != -1) {
+ ret = remove(fm_log);
+ if (ret)
+ DPAA_PMD_ERR("File remove: Failed");
+ }
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017,2019 NXP
+ */
+
+#ifndef __DPAA_FLOW_H__
+#define __DPAA_FLOW_H__
+
+int dpaa_fm_init(void);
+int dpaa_fm_term(void);
+int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set);
+int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif);
+void dpaa_write_fm_config_to_file(void);
+
+#endif
@@ -10,6 +10,7 @@ deps += ['mempool_dpaa']
sources = files('dpaa_ethdev.c',
'fmlib/fm_lib.c',
'fmlib/fm_vsp.c',
+ 'dpaa_flow.c',
'dpaa_rxtx.c')
if cc.has_argument('-Wno-pointer-arith')