diff mbox series

[net-next] net: mhi-net: Add de-aggeration support

Message ID 1611589557-31012-1-git-send-email-loic.poulain@linaro.org
State New
Headers show
Series [net-next] net: mhi-net: Add de-aggeration support | expand

Commit Message

Loic Poulain Jan. 25, 2021, 3:45 p.m. UTC
When device side MTU is larger than host side MRU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previoulsy considered as error and fragments were simply dropped.

This patch implements the aggregation mechanism allowing to recover
the initial packet. It also prints a warning (once) since this behavior
usually comes from a misconfiguration of the device (modem).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

---
 drivers/net/mhi_net.c | 74 ++++++++++++++++++++++++++++++++++++++++++++-------
 1 file changed, 64 insertions(+), 10 deletions(-)

-- 
2.7.4

Comments

Jakub Kicinski Jan. 30, 2021, 1:01 a.m. UTC | #1
On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:
> When device side MTU is larger than host side MRU, the packets

> (typically rmnet packets) are split over multiple MHI transfers.

> In that case, fragments must be re-aggregated to recover the packet

> before forwarding to upper layer.

> 

> A fragmented packet result in -EOVERFLOW MHI transaction status for

> each of its fragments, except the final one. Such transfer was

> previoulsy considered as error and fragments were simply dropped.

> 

> This patch implements the aggregation mechanism allowing to recover

> the initial packet. It also prints a warning (once) since this behavior

> usually comes from a misconfiguration of the device (modem).

> 

> Signed-off-by: Loic Poulain <loic.poulain@linaro.org>


> +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> +					  struct sk_buff *skb1,

> +					  struct sk_buff *skb2)

> +{

> +	struct sk_buff *new_skb;

> +

> +	/* This is the first fragment */

> +	if (!skb1)

> +		return skb2;

> +

> +	/* Expand packet */

> +	new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> +	dev_kfree_skb_any(skb1);

> +	if (!new_skb)

> +		return skb2;


I don't get it, if you failed to grow the skb you'll return the next
fragment to the caller? So the frame just lost all of its data up to
where skb2 started? The entire fragment "train" should probably be
dropped at this point.

I think you can just hang the skbs off skb_shinfo(p)->frag_list.

Willem - is it legal to feed frag_listed skbs into netif_rx()?

> +	/* Append to expanded packet */

> +	memcpy(skb_put(new_skb, skb2->len), skb2->data, skb2->len);

> +

> +	/* free appended skb */

> +	dev_kfree_skb_any(skb2);

> +

> +	return new_skb;

> +}

> +

>  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,

>  				struct mhi_result *mhi_res)

>  {

> @@ -143,19 +169,44 @@ static void mhi_net_dl_callback(struct mhi_device *mhi_dev,

>  	remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);

>  

>  	if (unlikely(mhi_res->transaction_status)) {

> -		dev_kfree_skb_any(skb);

> -

> -		/* MHI layer stopping/resetting the DL channel */

> -		if (mhi_res->transaction_status == -ENOTCONN)

> +		switch (mhi_res->transaction_status) {

> +		case -EOVERFLOW:

> +			/* Packet can not fit in one MHI buffer and has been

> +			 * split over multiple MHI transfers, do re-aggregation.

> +			 * That usually means the device side MTU is larger than

> +			 * the host side MTU/MRU. Since this is not optimal,

> +			 * print a warning (once).

> +			 */

> +			netdev_warn_once(mhi_netdev->ndev,

> +					 "Fragmented packets received, fix MTU?\n");

> +			skb_put(skb, mhi_res->bytes_xferd);

> +			mhi_netdev->skbagg = mhi_net_skb_append(mhi_dev,

> +								mhi_netdev->skbagg,

> +								skb);

> +			break;

> +		case -ENOTCONN:

> +			/* MHI layer stopping/resetting the DL channel */

> +			dev_kfree_skb_any(skb);

>  			return;

> -

> -		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);

> -		u64_stats_inc(&mhi_netdev->stats.rx_errors);

> -		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);

> +		default:

> +			/* Unknown error, simply drop */

> +			dev_kfree_skb_any(skb);

> +			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);

> +			u64_stats_inc(&mhi_netdev->stats.rx_errors);

> +			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);

> +		}

>  	} else {

> +		skb_put(skb, mhi_res->bytes_xferd);

> +

> +		if (mhi_netdev->skbagg) {

> +			/* Aggregate the final fragment */

> +			skb = mhi_net_skb_append(mhi_dev, mhi_netdev->skbagg, skb);

> +			mhi_netdev->skbagg = NULL;

> +		}

> +

>  		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);

>  		u64_stats_inc(&mhi_netdev->stats.rx_packets);

> -		u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);

> +		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);

>  		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);

>  

>  		switch (skb->data[0] & 0xf0) {
Willem de Bruijn Jan. 30, 2021, 2:12 a.m. UTC | #2
On Fri, Jan 29, 2021 at 8:01 PM Jakub Kicinski <kuba@kernel.org> wrote:
>

> On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:

> > When device side MTU is larger than host side MRU, the packets

> > (typically rmnet packets) are split over multiple MHI transfers.

> > In that case, fragments must be re-aggregated to recover the packet

> > before forwarding to upper layer.

> >

> > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > each of its fragments, except the final one. Such transfer was

> > previoulsy considered as error and fragments were simply dropped.

> >

> > This patch implements the aggregation mechanism allowing to recover

> > the initial packet. It also prints a warning (once) since this behavior

> > usually comes from a misconfiguration of the device (modem).

> >

> > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

>

> > +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> > +                                       struct sk_buff *skb1,

> > +                                       struct sk_buff *skb2)

> > +{

> > +     struct sk_buff *new_skb;

> > +

> > +     /* This is the first fragment */

> > +     if (!skb1)

> > +             return skb2;

> > +

> > +     /* Expand packet */

> > +     new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> > +     dev_kfree_skb_any(skb1);

> > +     if (!new_skb)

> > +             return skb2;

>

> I don't get it, if you failed to grow the skb you'll return the next

> fragment to the caller? So the frame just lost all of its data up to

> where skb2 started? The entire fragment "train" should probably be

> dropped at this point.

>

> I think you can just hang the skbs off skb_shinfo(p)->frag_list.

>

> Willem - is it legal to feed frag_listed skbs into netif_rx()?


As far as I know. udp gro will generate frag_list packets through
dev_gro_receive. That and netif_rx share most downstream code. I don't
think anything between netif_rx and __netif_receive_skb_core cares
about the skb contents.
Loic Poulain Feb. 1, 2021, 8:15 a.m. UTC | #3
Hi Jakub, Willem,

On Sat, 30 Jan 2021 at 02:01, Jakub Kicinski <kuba@kernel.org> wrote:
>

> On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:

> > When device side MTU is larger than host side MRU, the packets

> > (typically rmnet packets) are split over multiple MHI transfers.

> > In that case, fragments must be re-aggregated to recover the packet

> > before forwarding to upper layer.

> >

> > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > each of its fragments, except the final one. Such transfer was

> > previoulsy considered as error and fragments were simply dropped.

> >

> > This patch implements the aggregation mechanism allowing to recover

> > the initial packet. It also prints a warning (once) since this behavior

> > usually comes from a misconfiguration of the device (modem).

> >

> > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

>

> > +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> > +                                       struct sk_buff *skb1,

> > +                                       struct sk_buff *skb2)

> > +{

> > +     struct sk_buff *new_skb;

> > +

> > +     /* This is the first fragment */

> > +     if (!skb1)

> > +             return skb2;

> > +

> > +     /* Expand packet */

> > +     new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> > +     dev_kfree_skb_any(skb1);

> > +     if (!new_skb)

> > +             return skb2;

>

> I don't get it, if you failed to grow the skb you'll return the next

> fragment to the caller? So the frame just lost all of its data up to

> where skb2 started? The entire fragment "train" should probably be

> dropped at this point.


Right, there is no point in keeping the partial packet in that case.

>

> I think you can just hang the skbs off skb_shinfo(p)->frag_list.

>

> Willem - is it legal to feed frag_listed skbs into netif_rx()?


In QMAP case, the packets will be forwarded to rmnet link, which works
with linear skb (no NETIF_F_SG), does the linearization will be
properly performed by the net core, in the same way as for xmit path?

Regards,
Loic
Willem de Bruijn Feb. 1, 2021, 2:23 p.m. UTC | #4
On Mon, Feb 1, 2021 at 3:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>

> Hi Jakub, Willem,

>

> On Sat, 30 Jan 2021 at 02:01, Jakub Kicinski <kuba@kernel.org> wrote:

> >

> > On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:

> > > When device side MTU is larger than host side MRU, the packets

> > > (typically rmnet packets) are split over multiple MHI transfers.

> > > In that case, fragments must be re-aggregated to recover the packet

> > > before forwarding to upper layer.

> > >

> > > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > > each of its fragments, except the final one. Such transfer was

> > > previoulsy considered as error and fragments were simply dropped.

> > >

> > > This patch implements the aggregation mechanism allowing to recover

> > > the initial packet. It also prints a warning (once) since this behavior

> > > usually comes from a misconfiguration of the device (modem).

> > >

> > > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> >

> > > +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> > > +                                       struct sk_buff *skb1,

> > > +                                       struct sk_buff *skb2)

> > > +{

> > > +     struct sk_buff *new_skb;

> > > +

> > > +     /* This is the first fragment */

> > > +     if (!skb1)

> > > +             return skb2;

> > > +

> > > +     /* Expand packet */

> > > +     new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> > > +     dev_kfree_skb_any(skb1);

> > > +     if (!new_skb)

> > > +             return skb2;

> >

> > I don't get it, if you failed to grow the skb you'll return the next

> > fragment to the caller? So the frame just lost all of its data up to

> > where skb2 started? The entire fragment "train" should probably be

> > dropped at this point.

>

> Right, there is no point in keeping the partial packet in that case.

>

> >

> > I think you can just hang the skbs off skb_shinfo(p)->frag_list.

> >

> > Willem - is it legal to feed frag_listed skbs into netif_rx()?

>

> In QMAP case, the packets will be forwarded to rmnet link, which works

> with linear skb (no NETIF_F_SG), does the linearization will be

> properly performed by the net core, in the same way as for xmit path?


What is this path to rmnet, if not the usual xmit path / validate_xmit_skb?
Loic Poulain Feb. 1, 2021, 4:48 p.m. UTC | #5
On Mon, 1 Feb 2021 at 15:24, Willem de Bruijn
<willemdebruijn.kernel@gmail.com> wrote:
>

> On Mon, Feb 1, 2021 at 3:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:

> >

> > Hi Jakub, Willem,

> >

> > On Sat, 30 Jan 2021 at 02:01, Jakub Kicinski <kuba@kernel.org> wrote:

> > >

> > > On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:

> > > > When device side MTU is larger than host side MRU, the packets

> > > > (typically rmnet packets) are split over multiple MHI transfers.

> > > > In that case, fragments must be re-aggregated to recover the packet

> > > > before forwarding to upper layer.

> > > >

> > > > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > > > each of its fragments, except the final one. Such transfer was

> > > > previoulsy considered as error and fragments were simply dropped.

> > > >

> > > > This patch implements the aggregation mechanism allowing to recover

> > > > the initial packet. It also prints a warning (once) since this behavior

> > > > usually comes from a misconfiguration of the device (modem).

> > > >

> > > > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> > >

> > > > +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> > > > +                                       struct sk_buff *skb1,

> > > > +                                       struct sk_buff *skb2)

> > > > +{

> > > > +     struct sk_buff *new_skb;

> > > > +

> > > > +     /* This is the first fragment */

> > > > +     if (!skb1)

> > > > +             return skb2;

> > > > +

> > > > +     /* Expand packet */

> > > > +     new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> > > > +     dev_kfree_skb_any(skb1);

> > > > +     if (!new_skb)

> > > > +             return skb2;

> > >

> > > I don't get it, if you failed to grow the skb you'll return the next

> > > fragment to the caller? So the frame just lost all of its data up to

> > > where skb2 started? The entire fragment "train" should probably be

> > > dropped at this point.

> >

> > Right, there is no point in keeping the partial packet in that case.

> >

> > >

> > > I think you can just hang the skbs off skb_shinfo(p)->frag_list.

> > >

> > > Willem - is it legal to feed frag_listed skbs into netif_rx()?

> >

> > In QMAP case, the packets will be forwarded to rmnet link, which works

> > with linear skb (no NETIF_F_SG), does the linearization will be

> > properly performed by the net core, in the same way as for xmit path?

>

> What is this path to rmnet, if not the usual xmit path / validate_xmit_skb?


I mean, not sure what to do exactly here, instead of using
skb_copy_expand to re-aggregate data from the different skbs, Jakub
suggests chaining the skbs instead (via 'frag_list' and 'next' pointer
I assume), and to push this chained skb to net core via netif_rx. In
that case, I assume the de-fragmentation/linearization will happen in
the net core, right? If the transported protocol is rmnet, the packet
is supposed to reach the rmnet_rx_handler at some point, but rmnet
only works with standard/linear skbs.

Regards,
Loic
Willem de Bruijn Feb. 1, 2021, 6:33 p.m. UTC | #6
On Mon, Feb 1, 2021 at 11:41 AM Loic Poulain <loic.poulain@linaro.org> wrote:
>

> On Mon, 1 Feb 2021 at 15:24, Willem de Bruijn

> <willemdebruijn.kernel@gmail.com> wrote:

> >

> > On Mon, Feb 1, 2021 at 3:08 AM Loic Poulain <loic.poulain@linaro.org> wrote:

> > >

> > > Hi Jakub, Willem,

> > >

> > > On Sat, 30 Jan 2021 at 02:01, Jakub Kicinski <kuba@kernel.org> wrote:

> > > >

> > > > On Mon, 25 Jan 2021 16:45:57 +0100 Loic Poulain wrote:

> > > > > When device side MTU is larger than host side MRU, the packets

> > > > > (typically rmnet packets) are split over multiple MHI transfers.

> > > > > In that case, fragments must be re-aggregated to recover the packet

> > > > > before forwarding to upper layer.

> > > > >

> > > > > A fragmented packet result in -EOVERFLOW MHI transaction status for

> > > > > each of its fragments, except the final one. Such transfer was

> > > > > previoulsy considered as error and fragments were simply dropped.

> > > > >

> > > > > This patch implements the aggregation mechanism allowing to recover

> > > > > the initial packet. It also prints a warning (once) since this behavior

> > > > > usually comes from a misconfiguration of the device (modem).

> > > > >

> > > > > Signed-off-by: Loic Poulain <loic.poulain@linaro.org>

> > > >

> > > > > +static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,

> > > > > +                                       struct sk_buff *skb1,

> > > > > +                                       struct sk_buff *skb2)

> > > > > +{

> > > > > +     struct sk_buff *new_skb;

> > > > > +

> > > > > +     /* This is the first fragment */

> > > > > +     if (!skb1)

> > > > > +             return skb2;

> > > > > +

> > > > > +     /* Expand packet */

> > > > > +     new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);

> > > > > +     dev_kfree_skb_any(skb1);

> > > > > +     if (!new_skb)

> > > > > +             return skb2;

> > > >

> > > > I don't get it, if you failed to grow the skb you'll return the next

> > > > fragment to the caller? So the frame just lost all of its data up to

> > > > where skb2 started? The entire fragment "train" should probably be

> > > > dropped at this point.

> > >

> > > Right, there is no point in keeping the partial packet in that case.

> > >

> > > >

> > > > I think you can just hang the skbs off skb_shinfo(p)->frag_list.

> > > >

> > > > Willem - is it legal to feed frag_listed skbs into netif_rx()?

> > >

> > > In QMAP case, the packets will be forwarded to rmnet link, which works

> > > with linear skb (no NETIF_F_SG), does the linearization will be

> > > properly performed by the net core, in the same way as for xmit path?

> >

> > What is this path to rmnet, if not the usual xmit path / validate_xmit_skb?

>

> I mean, not sure what to do exactly here, instead of using

> skb_copy_expand to re-aggregate data from the different skbs, Jakub

> suggests chaining the skbs instead (via 'frag_list' and 'next' pointer

> I assume), and to push this chained skb to net core via netif_rx. In

> that case, I assume the de-fragmentation/linearization will happen in

> the net core, right? If the transported protocol is rmnet, the packet

> is supposed to reach the rmnet_rx_handler at some point, but rmnet

> only works with standard/linear skbs.


If it has that limitation, the rx_handler should have a check and linearize.

That is simpler than this skb_copy_expand, and as far as I can see no
more expensive.
Jakub Kicinski Feb. 1, 2021, 8:55 p.m. UTC | #7
On Mon, 1 Feb 2021 13:33:05 -0500 Willem de Bruijn wrote:
> On Mon, Feb 1, 2021 at 11:41 AM Loic Poulain <loic.poulain@linaro.org> wrote:

> > On Mon, 1 Feb 2021 at 15:24, Willem de Bruijn wrote:  

> > > What is this path to rmnet, if not the usual xmit path / validate_xmit_skb?  

> >

> > I mean, not sure what to do exactly here, instead of using

> > skb_copy_expand to re-aggregate data from the different skbs, Jakub

> > suggests chaining the skbs instead (via 'frag_list' and 'next' pointer

> > I assume), and to push this chained skb to net core via netif_rx. In

> > that case, I assume the de-fragmentation/linearization will happen in

> > the net core, right? If the transported protocol is rmnet, the packet

> > is supposed to reach the rmnet_rx_handler at some point, but rmnet

> > only works with standard/linear skbs.  

> 

> If it has that limitation, the rx_handler should have a check and linearize.


Do you mean it's there or we should add it? 

> That is simpler than this skb_copy_expand, and as far as I can see no

> more expensive.


rx_handler is only used by uppers which are 100% SW. I think the right
path is to fix the upper, rather than add a check to the fastpath, no?

Perhaps all that's needed is a:

 pskb_may_pull(skb, sizeof(struct rmnet_map_header))

in rmnet? Hope I'm not missing some crucial point :)
Willem de Bruijn Feb. 1, 2021, 9:20 p.m. UTC | #8
On Mon, Feb 1, 2021 at 3:55 PM Jakub Kicinski <kuba@kernel.org> wrote:
>

> On Mon, 1 Feb 2021 13:33:05 -0500 Willem de Bruijn wrote:

> > On Mon, Feb 1, 2021 at 11:41 AM Loic Poulain <loic.poulain@linaro.org> wrote:

> > > On Mon, 1 Feb 2021 at 15:24, Willem de Bruijn wrote:

> > > > What is this path to rmnet, if not the usual xmit path / validate_xmit_skb?

> > >

> > > I mean, not sure what to do exactly here, instead of using

> > > skb_copy_expand to re-aggregate data from the different skbs, Jakub

> > > suggests chaining the skbs instead (via 'frag_list' and 'next' pointer

> > > I assume), and to push this chained skb to net core via netif_rx. In

> > > that case, I assume the de-fragmentation/linearization will happen in

> > > the net core, right? If the transported protocol is rmnet, the packet

> > > is supposed to reach the rmnet_rx_handler at some point, but rmnet

> > > only works with standard/linear skbs.

> >

> > If it has that limitation, the rx_handler should have a check and linearize.

>

> Do you mean it's there or we should add it?


I mean that if rmnet cannot handle non-linear skbs, then
rmnet_rx_handler should linearize to be safe. It should not just rely
on __netif_receive_skb_core passing it only linear packets. That is
fragile.

I suppose the requirement is due to how rmnet_map_deaggregate splits
packets, with bare memcpy + skb_pull. I don't see a linearize call. If
only this path is affected, an skb_linearize could also be limited to
this branch. Or the code converted to handle skb frags and frag_list.

That said, addressing the fragile behavior is somewhat tangential to
the aim of this patch: passing such packets to
__netif_receive_skb_core. Your comment rightly asked about the logic
during allocation failure. If instead the whole packet is just
discarded at that point, I suppose the current approach works.

> > That is simpler than this skb_copy_expand, and as far as I can see no

> > more expensive.

>

> rx_handler is only used by uppers which are 100% SW. I think the right

> path is to fix the upper, rather than add a check to the fastpath, no?


Right. I think we're on the same page. I did not mean linearizing
before calling any rx_handler. Just specific inside this rx_handler
implementation.
diff mbox series

Patch

diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index a5a214d..780086f 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -34,6 +34,7 @@  struct mhi_net_dev {
 	struct mhi_device *mdev;
 	struct net_device *ndev;
 	struct delayed_work rx_refill;
+	struct sk_buff *skbagg;
 	struct mhi_net_stats stats;
 	u32 rx_queue_sz;
 };
@@ -133,6 +134,31 @@  static void mhi_net_setup(struct net_device *ndev)
 	ndev->tx_queue_len = 1000;
 }
 
+static struct sk_buff *mhi_net_skb_append(struct mhi_device *mhi_dev,
+					  struct sk_buff *skb1,
+					  struct sk_buff *skb2)
+{
+	struct sk_buff *new_skb;
+
+	/* This is the first fragment */
+	if (!skb1)
+		return skb2;
+
+	/* Expand packet */
+	new_skb = skb_copy_expand(skb1, 0, skb2->len, GFP_ATOMIC);
+	dev_kfree_skb_any(skb1);
+	if (!new_skb)
+		return skb2;
+
+	/* Append to expanded packet */
+	memcpy(skb_put(new_skb, skb2->len), skb2->data, skb2->len);
+
+	/* free appended skb */
+	dev_kfree_skb_any(skb2);
+
+	return new_skb;
+}
+
 static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 				struct mhi_result *mhi_res)
 {
@@ -143,19 +169,44 @@  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 	remaining = atomic_dec_return(&mhi_netdev->stats.rx_queued);
 
 	if (unlikely(mhi_res->transaction_status)) {
-		dev_kfree_skb_any(skb);
-
-		/* MHI layer stopping/resetting the DL channel */
-		if (mhi_res->transaction_status == -ENOTCONN)
+		switch (mhi_res->transaction_status) {
+		case -EOVERFLOW:
+			/* Packet can not fit in one MHI buffer and has been
+			 * split over multiple MHI transfers, do re-aggregation.
+			 * That usually means the device side MTU is larger than
+			 * the host side MTU/MRU. Since this is not optimal,
+			 * print a warning (once).
+			 */
+			netdev_warn_once(mhi_netdev->ndev,
+					 "Fragmented packets received, fix MTU?\n");
+			skb_put(skb, mhi_res->bytes_xferd);
+			mhi_netdev->skbagg = mhi_net_skb_append(mhi_dev,
+								mhi_netdev->skbagg,
+								skb);
+			break;
+		case -ENOTCONN:
+			/* MHI layer stopping/resetting the DL channel */
+			dev_kfree_skb_any(skb);
 			return;
-
-		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
-		u64_stats_inc(&mhi_netdev->stats.rx_errors);
-		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		default:
+			/* Unknown error, simply drop */
+			dev_kfree_skb_any(skb);
+			u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
+			u64_stats_inc(&mhi_netdev->stats.rx_errors);
+			u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
+		}
 	} else {
+		skb_put(skb, mhi_res->bytes_xferd);
+
+		if (mhi_netdev->skbagg) {
+			/* Aggregate the final fragment */
+			skb = mhi_net_skb_append(mhi_dev, mhi_netdev->skbagg, skb);
+			mhi_netdev->skbagg = NULL;
+		}
+
 		u64_stats_update_begin(&mhi_netdev->stats.rx_syncp);
 		u64_stats_inc(&mhi_netdev->stats.rx_packets);
-		u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd);
+		u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len);
 		u64_stats_update_end(&mhi_netdev->stats.rx_syncp);
 
 		switch (skb->data[0] & 0xf0) {
@@ -170,7 +221,6 @@  static void mhi_net_dl_callback(struct mhi_device *mhi_dev,
 			break;
 		}
 
-		skb_put(skb, mhi_res->bytes_xferd);
 		netif_rx(skb);
 	}
 
@@ -270,6 +320,7 @@  static int mhi_net_probe(struct mhi_device *mhi_dev,
 	dev_set_drvdata(dev, mhi_netdev);
 	mhi_netdev->ndev = ndev;
 	mhi_netdev->mdev = mhi_dev;
+	mhi_netdev->skbagg = NULL;
 	SET_NETDEV_DEV(ndev, &mhi_dev->dev);
 	SET_NETDEV_DEVTYPE(ndev, &wwan_type);
 
@@ -304,6 +355,9 @@  static void mhi_net_remove(struct mhi_device *mhi_dev)
 
 	mhi_unprepare_from_transfer(mhi_netdev->mdev);
 
+	if (mhi_netdev->skbagg)
+		kfree_skb(mhi_netdev->skbagg);
+
 	free_netdev(mhi_netdev->ndev);
 }