@@ -597,7 +597,12 @@ void
fman_if_discard_rx_errors(struct fman_if *fm_if)
{
struct __fman_if *__if = container_of(fm_if, struct __fman_if, __if);
- unsigned int *fmbm_rfsdm, *fmbm_rfsem;
+ unsigned int *fmbm_rcfg, *fmbm_rfsdm, *fmbm_rfsem;
+ unsigned int val;
+
+ fmbm_rcfg = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rcfg;
+ val = in_be32(fmbm_rcfg);
+ out_be32(fmbm_rcfg, val & ~BMI_PORT_CFG_FDOVR);
fmbm_rfsem = &((struct rx_bmi_regs *)__if->bmi_map)->fmbm_rfsem;
out_be32(fmbm_rfsem, 0);
@@ -52,7 +52,8 @@
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_ERR_PKT_DROP;
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
@@ -262,6 +263,18 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
dev->data->scattered_rx = 1;
}
+ if (!(rx_offloads & DEV_RX_OFFLOAD_ERR_PKT_DROP)) {
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[0];
+
+ DPAA_PMD_DEBUG("error packets will not be droppped on hw");
+ fman_if_receive_rx_errors(fif, FM_FD_RX_STATUS_ERR_MASK);
+ fman_if_set_err_fqid(fif, rxq->fqid);
+ } else {
+ DPAA_PMD_DEBUG("error packets will be droppped on hw");
+ fman_if_discard_rx_errors(fif);
+ }
+
if (!(default_q || fmc_q)) {
if (dpaa_fm_config(dev,
eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
@@ -2031,9 +2044,6 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
fman_intf->mac_addr.addr_bytes[5]);
if (!fman_intf->is_shared_mac) {
- /* Configure error packet handling */
- fman_if_receive_rx_errors(fman_intf,
- FM_FD_RX_STATUS_ERR_MASK);
/* Disable RX mode */
fman_if_disable_rx(fman_intf);
/* Disable promiscuous mode */