diff mbox series

[net] net: ethernet: ti: cpsw: fix min eth packet size for non-switch use-cases

Message ID 20210611132732.10690-1-grygorii.strashko@ti.com
State New
Headers show
Series [net] net: ethernet: ti: cpsw: fix min eth packet size for non-switch use-cases | expand

Commit Message

Grygorii Strashko June 11, 2021, 1:27 p.m. UTC
The CPSW switchdev driver inherited fix from commit 9421c9015047 ("net:
ethernet: ti: cpsw: fix min eth packet size") which changes min TX packet
size to 64bytes (VLAN_ETH_ZLEN, excluding ETH_FCS). It was done to fix HW
packed drop issue when packets are sent from Host to the port with PVID and
un-tagging enabled. Unfortunately this breaks some other non-switch
specific use-cases, like:
- [1] CPSW port as DSA CPU port with DSA-tag applied at the end of the
packet
- [2] Some industrial protocols, which expects min TX packet size 60Bytes
(excluding FCS).

Fix it by configuring min TX packet size depending driver mode
 - 60Bytes (ETH_ZLEN) for multi mac (dual-mac) mode
 - 64Bytes (VLAN_ETH_ZLEN) for switch mode

[1] https://lore.kernel.org/netdev/20210531124051.GA15218@cephalopod/
[2] https://e2e.ti.com/support/arm/sitara_arm/f/791/t/701669

Cc: Ben Hutchings <ben.hutchings@essensium.com>
Cc: stable@vger.kernel.org
Fixes: ed3525eda4c4 ("net: ethernet: ti: introduce cpsw switchdev based driver part 1 - dual-emac")
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
---
 drivers/net/ethernet/ti/cpsw_new.c  | 12 +++++++++---
 drivers/net/ethernet/ti/cpsw_priv.h |  4 +++-
 2 files changed, 12 insertions(+), 4 deletions(-)

Comments

Ben Hutchings June 11, 2021, 5:54 p.m. UTC | #1
On Fri, Jun 11, 2021 at 04:27:32PM +0300, Grygorii Strashko wrote:
[...]
> --- a/drivers/net/ethernet/ti/cpsw_new.c
> +++ b/drivers/net/ethernet/ti/cpsw_new.c
> @@ -918,14 +918,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
>  	struct cpts *cpts = cpsw->cpts;
>  	struct netdev_queue *txq;
>  	struct cpdma_chan *txch;
> +	unsigned int len;
>  	int ret, q_idx;
>  
> -	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
> +	if (skb_padto(skb, priv->tx_packet_min)) {
>  		cpsw_err(priv, tx_err, "packet pad failed\n");
>  		ndev->stats.tx_dropped++;
>  		return NET_XMIT_DROP;
>  	}
>  
> +	len = skb->len < priv->tx_packet_min ? priv->tx_packet_min : skb->len;
> +
>  	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
>  	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
>  		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
> @@ -937,7 +940,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
>  	txch = cpsw->txv[q_idx].ch;
>  	txq = netdev_get_tx_queue(ndev, q_idx);
>  	skb_tx_timestamp(skb);
> -	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
> +	ret = cpdma_chan_submit(txch, skb, skb->data, len,
>  				priv->emac_port);
>  	if (unlikely(ret != 0)) {
>  		cpsw_err(priv, tx_err, "desc submit failed\n");

This change is odd because cpdma_chan_submit() already pads the DMA
length.

Would it not make more sense to update cpdma_params::min_packet_size
instead of adding a second minimum?

[...]
> @@ -1686,6 +1690,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
>  
>  			priv = netdev_priv(sl_ndev);
>  			slave->port_vlan = vlan;
> +			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE_VLAN;
>  			if (netif_running(sl_ndev))
>  				cpsw_port_add_switch_def_ale_entries(priv,
>  								     slave);
> @@ -1714,6 +1719,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
>  
>  			priv = netdev_priv(slave->ndev);
>  			slave->port_vlan = slave->data->dual_emac_res_vlan;
> +			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
>  			cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
>  		}
>
[...]

What happens if this races with the TX path?  Should there be a
netif_tx_lock() / netif_tx_unlock() around this change?

Ben.
Grygorii Strashko June 11, 2021, 8:52 p.m. UTC | #2
On 11/06/2021 20:54, Ben Hutchings wrote:
> On Fri, Jun 11, 2021 at 04:27:32PM +0300, Grygorii Strashko wrote:
> [...]
>> --- a/drivers/net/ethernet/ti/cpsw_new.c
>> +++ b/drivers/net/ethernet/ti/cpsw_new.c
>> @@ -918,14 +918,17 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
>>   	struct cpts *cpts = cpsw->cpts;
>>   	struct netdev_queue *txq;
>>   	struct cpdma_chan *txch;
>> +	unsigned int len;
>>   	int ret, q_idx;
>>   
>> -	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
>> +	if (skb_padto(skb, priv->tx_packet_min)) {
>>   		cpsw_err(priv, tx_err, "packet pad failed\n");
>>   		ndev->stats.tx_dropped++;
>>   		return NET_XMIT_DROP;
>>   	}
>>   
>> +	len = skb->len < priv->tx_packet_min ? priv->tx_packet_min : skb->len;
>> +
>>   	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
>>   	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
>>   		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
>> @@ -937,7 +940,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
>>   	txch = cpsw->txv[q_idx].ch;
>>   	txq = netdev_get_tx_queue(ndev, q_idx);
>>   	skb_tx_timestamp(skb);
>> -	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
>> +	ret = cpdma_chan_submit(txch, skb, skb->data, len,
>>   				priv->emac_port);
>>   	if (unlikely(ret != 0)) {
>>   		cpsw_err(priv, tx_err, "desc submit failed\n");
> 
> This change is odd because cpdma_chan_submit() already pads the DMA
> length.
> 
> Would it not make more sense to update cpdma_params::min_packet_size
> instead of adding a second minimum?

i've been thinking about it, but cpdma parameter copied into cpdma context once at probe,
so change will be more complex.
Can be done if you insist.

> 
> [...]
>> @@ -1686,6 +1690,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
>>   
>>   			priv = netdev_priv(sl_ndev);
>>   			slave->port_vlan = vlan;
>> +			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE_VLAN;
>>   			if (netif_running(sl_ndev))
>>   				cpsw_port_add_switch_def_ale_entries(priv,
>>   								     slave);
>> @@ -1714,6 +1719,7 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
>>   
>>   			priv = netdev_priv(slave->ndev);
>>   			slave->port_vlan = slave->data->dual_emac_res_vlan;
>> +			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
>>   			cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
>>   		}
>>
> [...]
> 
> What happens if this races with the TX path?  Should there be a
> netif_tx_lock() / netif_tx_unlock() around this change?

Mode change operation is heavy and expected to be done once when bridge is configured.
It will completely wipe out all ALE entries and so all VLAN setting -
which any way need to be configured (reconfigured) during bridge configuration.
So, traffic can be disturbed during mode change operation.

As result, in my opinion, it make no sense to add additional complexity here.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 11f536138495..401909bff319 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -918,14 +918,17 @@  static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 	struct cpts *cpts = cpsw->cpts;
 	struct netdev_queue *txq;
 	struct cpdma_chan *txch;
+	unsigned int len;
 	int ret, q_idx;
 
-	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+	if (skb_padto(skb, priv->tx_packet_min)) {
 		cpsw_err(priv, tx_err, "packet pad failed\n");
 		ndev->stats.tx_dropped++;
 		return NET_XMIT_DROP;
 	}
 
+	len = skb->len < priv->tx_packet_min ? priv->tx_packet_min : skb->len;
+
 	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
 	    priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
 		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -937,7 +940,7 @@  static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 	txch = cpsw->txv[q_idx].ch;
 	txq = netdev_get_tx_queue(ndev, q_idx);
 	skb_tx_timestamp(skb);
-	ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+	ret = cpdma_chan_submit(txch, skb, skb->data, len,
 				priv->emac_port);
 	if (unlikely(ret != 0)) {
 		cpsw_err(priv, tx_err, "desc submit failed\n");
@@ -1100,7 +1103,7 @@  static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
 
 	for (i = 0; i < n; i++) {
 		xdpf = frames[i];
-		if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+		if (xdpf->len < priv->tx_packet_min)
 			break;
 
 		if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
@@ -1389,6 +1392,7 @@  static int cpsw_create_ports(struct cpsw_common *cpsw)
 		priv->dev  = dev;
 		priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
 		priv->emac_port = i + 1;
+		priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
 
 		if (is_valid_ether_addr(slave_data->mac_addr)) {
 			ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
@@ -1686,6 +1690,7 @@  static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
 			priv = netdev_priv(sl_ndev);
 			slave->port_vlan = vlan;
+			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE_VLAN;
 			if (netif_running(sl_ndev))
 				cpsw_port_add_switch_def_ale_entries(priv,
 								     slave);
@@ -1714,6 +1719,7 @@  static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
 
 			priv = netdev_priv(slave->ndev);
 			slave->port_vlan = slave->data->dual_emac_res_vlan;
+			priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
 			cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
 		}
 
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index a323bea54faa..2951fb7b9dae 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -89,7 +89,8 @@  do {								\
 
 #define CPSW_POLL_WEIGHT	64
 #define CPSW_RX_VLAN_ENCAP_HDR_SIZE		4
-#define CPSW_MIN_PACKET_SIZE	(VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE_VLAN	(VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE	(ETH_ZLEN)
 #define CPSW_MAX_PACKET_SIZE	(VLAN_ETH_FRAME_LEN +\
 				 ETH_FCS_LEN +\
 				 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
@@ -380,6 +381,7 @@  struct cpsw_priv {
 	u32 emac_port;
 	struct cpsw_common *cpsw;
 	int offload_fwd_mark;
+	u32 tx_packet_min;
 };
 
 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)