diff mbox series

[net-next,v4,1/2] net: mhi-net: Add de-aggeration support

Message ID 1612365335-14117-1-git-send-email-loic.poulain@linaro.org
State Superseded
Headers show
Series [net-next,v4,1/2] net: mhi-net: Add de-aggeration support | expand

Commit Message

Loic Poulain Feb. 3, 2021, 3:15 p.m. UTC
When device side MTU is larger than host side MTU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previously considered as error and fragments were simply dropped.

This change adds re-aggregation mechanism using skb chaining, via
skb frag_list.

A warning (once) is printed since this behavior usually comes from
a misconfiguration of the device (e.g. modem MTU).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

---
 v2: use zero-copy skb chaining instead of skb_copy_expand.
 v3: Fix nit in commit msg + remove misleading inline comment for frag_list
 v4: no change

 drivers/net/mhi_net.c | 74 ++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 64 insertions(+), 10 deletions(-)

-- 
2.7.4

Comments

Jesse Brandeburg Feb. 3, 2021, 11:15 p.m. UTC | #1
Loic Poulain wrote:

> When device side MTU is larger than host side MTU, the packets

> (typically rmnet packets) are split over multiple MHI transfers.

> In that case, fragments must be re-aggregated to recover the packet

> before forwarding to upper layer.

> 

> A fragmented packet result in -EOVERFLOW MHI transaction status for

> each of its fragments, except the final one. Such transfer was

> previously considered as error and fragments were simply dropped.

> 

> This change adds re-aggregation mechanism using skb chaining, via

> skb frag_list.

> 

> A warning (once) is printed since this behavior usually comes from

> a misconfiguration of the device (e.g. modem MTU).

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> ---

>  v2: use zero-copy skb chaining instead of skb_copy_expand.

>  v3: Fix nit in commit msg + remove misleading inline comment for frag_list

>  v4: no change


apologies for the nit, can you please fix the spelling of aggregation in
the subject?
Loic Poulain Feb. 4, 2021, 8:01 a.m. UTC | #2
Hi Jesse,

On Thu, 4 Feb 2021 at 00:16, Jesse Brandeburg
<jesse.brandeburg@intel.com> wrote:
>

> Loic Poulain wrote:

>

> > When device side MTU is larger than host side MTU, the packets

> > (typically rmnet packets) are split over multiple MHI transfers.

> > In that case, fragments must be re-aggregated to recover the packet

> > before forwarding to upper layer.

> >

> > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > each of its fragments, except the final one. Such transfer was

> > previously considered as error and fragments were simply dropped.

> >

> > This change adds re-aggregation mechanism using skb chaining, via

> > skb frag_list.

> >

> > A warning (once) is printed since this behavior usually comes from

> > a misconfiguration of the device (e.g. modem MTU).

> >

> > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> > ---

> >  v2: use zero-copy skb chaining instead of skb_copy_expand.

> >  v3: Fix nit in commit msg + remove misleading inline comment for frag_list

> >  v4: no change

>

> apologies for the nit, can you please fix the spelling of aggregation in

> the subject?


I will, anyway I need to change that since it is not de-aggregation
but re-aggregation of fragmented packets.

Thanks
Loic
diff mbox series

Patch

diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index 4f512531..8800991 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -32,6 +32,8 @@  struct mhi_net_stats {
 struct mhi_net_dev {
 	struct mhi_device *mdev;
 	struct net_device *ndev;
+	struct sk_buff *skbagg_head;
+	struct sk_buff *skbagg_tail;
 	struct delayed_work rx_refill;
 	struct mhi_net_stats stats;
 	u32 rx_queue_sz;
@@ -132,6 +134,32 @@  static void mhi_net_setup(struct net_device *ndev)
 	ndev->tx_queue_len = 1000;
 }
 
+static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev,
+				       struct sk_buff *skb)
+{
+	struct sk_buff *head = mhi_netdev->skbagg_head;
+	struct sk_buff *tail = mhi_netdev->skbagg_tail;
+
+	/* This is non-paged skb chaining using frag_list */
+	if (!head) {
+		mhi_netdev->skbagg_head = skb;
+		return skb;
+	}
+
+	if (!skb_shinfo(head)->frag_list)
+		skb_shinfo(head)->frag_list = skb;
+	else
+		tail->next = skb;
+
+	head->len += skb->len;
+	head->data_len += skb->len;
+	head->truesize += skb->truesize;
+
+	mhi_netdev->skbagg_tail = skb;
+
+	return mhi_netdev->skbagg_head;
+}
+
 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 				struct mhi_result *mhi_res)
 {
@@ -142,19 +170,42 @@  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 	free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
 
 	if (unlikely(mhi_res->transaction_status)) {
-		dev_kfree_skb_any(skb);
-
-		/* MHI layer stopping/resetting the DL channel */
-		if (mhi_res->transaction_status == -ENOTCONN)
+		switch (mhi_res->transaction_status) {
+		case -EOVERFLOW:
+			/* Packet can not fit in one MHI buffer and has been
+			 * split over multiple MHI transfers, do re-aggregation.
+			 * That usually means the device side MTU is larger than
+			 * the host side MTU/MRU. Since this is not optimal,
+			 * print a warning (once).
+			 */
+			netdev_warn_once(mhi_netdev->ndev,
+					 "Fragmented packets received, fix MTU?\n");
+			skb_put(skb, mhi_res->bytes_xferd);
+			mhi_net_skb_agg(mhi_netdev, skb);
+			break;
+		case -ENOTCONN:
+			/* MHI layer stopping/resetting the DL channel */
+			dev_kfree_skb_any(skb);
 			return;
-
-		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
-		u64_stats_inc(&mhi_netdev->stats.rx_errors);
-		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		default:
+			/* Unknown error, simply drop */
+			dev_kfree_skb_any(skb);
+			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+			u64_stats_inc(&mhi_netdev->stats.rx_errors);
+			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		}
 	} else {
+		skb_put(skb, mhi_res->bytes_xferd);
+
+		if (mhi_netdev->skbagg_head) {
+			/* Aggregate the final fragment */
+			skb = mhi_net_skb_agg(mhi_netdev, skb);
+			mhi_netdev->skbagg_head = NULL;
+		}
+
 		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
 		u64_stats_inc(&mhi_netdev->stats.rx_packets);
-		u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
 		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
 
 		switch (skb->data[0] & 0xf0) {
@@ -169,7 +220,6 @@  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 			break;
 		}
 
-		skb_put(skb, mhi_res->bytes_xferd);
 		netif_rx(skb);
 	}
 
@@ -267,6 +317,7 @@  static int mhi_net_probe(struct mhi_device *mhi_dev,
 	dev_set_drvdata(dev, mhi_netdev);
 	mhi_netdev->ndev = ndev;
 	mhi_netdev->mdev = mhi_dev;
+	mhi_netdev->skbagg_head = NULL;
 	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
 	SET_NETDEV_DEVTYPE(ndev, &wwan_type);
 
@@ -301,6 +352,9 @@  static void mhi_net_remove(struct mhi_device *mhi_dev)
 
 	mhi_unprepare_from_transfer(mhi_netdev->mdev);
 
+	if (mhi_netdev->skbagg_head)
+		kfree_skb(mhi_netdev->skbagg_head);
+
 	free_netdev(mhi_netdev->ndev);
 }