diff mbox series

[RFC,net-next] udp: add a GSO type for UDPv6

Message ID 1599048911-7923-1-git-send-email-tanhuazhong@huawei.com
State New
Headers show
Series [RFC,net-next] udp: add a GSO type for UDPv6 | expand

Commit Message

Huazhong Tan Sept. 2, 2020, 12:15 p.m. UTC
In some cases, for UDP GSO, UDPv4 and UDPv6 need to be handled
separately, for example, checksum offload, so add new GSO type
SKB_GSO_UDPV6_L4 for UDPv6, and the old SKB_GSO_UDP_L4 stands
for UDPv4.

Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
---
 drivers/net/ethernet/chelsio/cxgb4/sge.c                | 17 ++++++++---------
 drivers/net/ethernet/intel/i40e/i40e_txrx.c             |  2 +-
 drivers/net/ethernet/intel/ice/ice_txrx.c               |  2 +-
 drivers/net/ethernet/intel/igb/igb_main.c               |  4 ++--
 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c           |  4 ++--
 .../net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h |  2 +-
 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c         |  2 +-
 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c       |  8 +++++---
 include/linux/netdev_features.h                         |  2 ++
 include/linux/netdevice.h                               |  1 +
 include/linux/skbuff.h                                  |  8 ++++++++
 include/linux/udp.h                                     |  4 ++--
 net/core/filter.c                                       |  6 ++----
 net/core/skbuff.c                                       |  2 +-
 net/ipv6/udp.c                                          |  2 +-
 net/ipv6/udp_offload.c                                  |  6 +++---
 16 files changed, 41 insertions(+), 31 deletions(-)

Comments

Willem de Bruijn Sept. 2, 2020, 2:33 p.m. UTC | #1
On Wed, Sep 2, 2020 at 2:18 PM Huazhong Tan <tanhuazhong@huawei.com> wrote:
>
> In some cases, for UDP GSO, UDPv4 and UDPv6 need to be handled
> separately, for example, checksum offload, so add new GSO type
> SKB_GSO_UDPV6_L4 for UDPv6, and the old SKB_GSO_UDP_L4 stands
> for UDPv4.

This is in preparation for hardware you have that actually cares about
this distinction, I guess?


> diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
> index 2cc3cf8..b7c1a76 100644
> --- a/include/linux/netdev_features.h
> +++ b/include/linux/netdev_features.h
> @@ -54,6 +54,7 @@ enum {
>         NETIF_F_GSO_UDP_BIT,            /* ... UFO, deprecated except tuntap */
>         NETIF_F_GSO_UDP_L4_BIT,         /* ... UDP payload GSO (not UFO) */
>         NETIF_F_GSO_FRAGLIST_BIT,               /* ... Fraglist GSO */
> +       NETIF_F_GSO_UDPV6_L4_BIT,       /* ... UDPv6 payload GSO (not UFO) */
>         /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
>                 NETIF_F_GSO_FRAGLIST_BIT,

Need to update NETIF_F_GSO_LAST then, too.
David Miller Sept. 2, 2020, 10:43 p.m. UTC | #2
From: Huazhong Tan <tanhuazhong@huawei.com>
Date: Wed, 2 Sep 2020 20:15:11 +0800

> In some cases, for UDP GSO, UDPv4 and UDPv6 need to be handled
> separately, for example, checksum offload, so add new GSO type
> SKB_GSO_UDPV6_L4 for UDPv6, and the old SKB_GSO_UDP_L4 stands
> for UDPv4.
> 
> Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>

Please submit this alongside something that needs to use it.

Thank you.
Huazhong Tan Sept. 3, 2020, 1:25 a.m. UTC | #3
On 2020/9/2 22:33, Willem de Bruijn wrote:
> On Wed, Sep 2, 2020 at 2:18 PM Huazhong Tan <tanhuazhong@huawei.com> wrote:
>>
>> In some cases, for UDP GSO, UDPv4 and UDPv6 need to be handled
>> separately, for example, checksum offload, so add new GSO type
>> SKB_GSO_UDPV6_L4 for UDPv6, and the old SKB_GSO_UDP_L4 stands
>> for UDPv4.
> 
> This is in preparation for hardware you have that actually cares about
> this distinction, I guess?
> 

it is mainly for separating checksum offload of IPv4 and IPv6 right now.
with this patch, the user can switch checksum offload of IPv4 and not
affect IPv6's, vice versa.

> 
>> diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
>> index 2cc3cf8..b7c1a76 100644
>> --- a/include/linux/netdev_features.h
>> +++ b/include/linux/netdev_features.h
>> @@ -54,6 +54,7 @@ enum {
>>          NETIF_F_GSO_UDP_BIT,            /* ... UFO, deprecated except tuntap */
>>          NETIF_F_GSO_UDP_L4_BIT,         /* ... UDP payload GSO (not UFO) */
>>          NETIF_F_GSO_FRAGLIST_BIT,               /* ... Fraglist GSO */
>> +       NETIF_F_GSO_UDPV6_L4_BIT,       /* ... UDPv6 payload GSO (not UFO) */
>>          /**/NETIF_F_GSO_LAST =          /* last bit, see GSO_MASK */
>>                  NETIF_F_GSO_FRAGLIST_BIT,
> 
> Need to update NETIF_F_GSO_LAST then, too.

ok, thanks.

> 
>
Huazhong Tan Sept. 3, 2020, 1:26 a.m. UTC | #4
On 2020/9/3 6:43, David Miller wrote:
> From: Huazhong Tan <tanhuazhong@huawei.com>
> Date: Wed, 2 Sep 2020 20:15:11 +0800
> 
>> In some cases, for UDP GSO, UDPv4 and UDPv6 need to be handled
>> separately, for example, checksum offload, so add new GSO type
>> SKB_GSO_UDPV6_L4 for UDPv6, and the old SKB_GSO_UDP_L4 stands
>> for UDPv4.
>>
>> Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
> 
> Please submit this alongside something that needs to use it.
> 

will add in V2, thanks.

> Thank you.
> 
> .
>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fddd70e..c96a0af 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -735,7 +735,7 @@  static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
 	    chip_ver > CHELSIO_T5) {
 		hdrlen = sizeof(struct cpl_tx_tnl_lso);
 		hdrlen += sizeof(struct cpl_tx_pkt_core);
-	} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+	} else if (skb_is_gso_udp(skb)) {
 		return 0;
 	} else {
 		hdrlen = skb_shinfo(skb)->gso_size ?
@@ -782,7 +782,7 @@  static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
 		if (skb->encapsulation && chip_ver > CHELSIO_T5) {
 			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
 				 sizeof(struct cpl_tx_tnl_lso);
-		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+		} else if (skb_is_gso_udp(skb)) {
 			u32 pkt_hdrlen;
 
 			pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
@@ -1498,14 +1498,15 @@  static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
 	eowr = (void *)&q->q.desc[q->q.pidx];
 	wr->equiq_to_len16 = htonl(wr_mid);
 	wr->r3 = cpu_to_be64(0);
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+	if (skb_is_gso_udp(skb))
 		end = (u64 *)eowr + flits;
 	else
 		end = (u64 *)wr + flits;
 
 	len = immediate ? skb->len : 0;
 	len += sizeof(*cpl);
-	if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
+	if (ssi->gso_size &&
+	    !(ssi->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_UDPV6_L4))) {
 		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
 		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
 
@@ -2061,8 +2062,7 @@  static inline u8 ethofld_calc_tx_flits(struct adapter *adap,
 	u32 wrlen;
 
 	wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
-	if (skb_shinfo(skb)->gso_size &&
-	    !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+	if (skb_shinfo(skb)->gso_size && !skb_is_gso_udp(skb))
 		wrlen += sizeof(struct cpl_tx_pkt_lso_core);
 
 	wrlen += roundup(hdr_len, 16);
@@ -2097,8 +2097,7 @@  static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
 
 	wrlen16 = DIV_ROUND_UP(wrlen, 16);
 	immd_len = sizeof(struct cpl_tx_pkt_core);
-	if (skb_shinfo(skb)->gso_size &&
-	    !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
+	if (skb_shinfo(skb)->gso_size && !skb_is_gso_udp(skb))
 		immd_len += sizeof(struct cpl_tx_pkt_lso_core);
 	immd_len += hdr_len;
 
@@ -2259,7 +2258,7 @@  static int ethofld_hard_xmit(struct net_device *dev,
 	}
 
 	if (skb_shinfo(skb)->gso_size) {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+		if (skb_is_gso_udp(skb))
 			eohw_txq->uso++;
 		else
 			eohw_txq->tso++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 91ab824..1c5b621 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2957,7 +2957,7 @@  static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
 	/* remove payload length from inner checksum */
 	paylen = skb->len - l4_offset;
 
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+	if (skb_shinfo(skb)->gso_type & skb_is_gso_udp(skb)) {
 		csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
 		/* compute length of segmentation header */
 		*hdr_len = sizeof(*l4.udp) + l4_offset;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index eae7526..f8b6471 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2153,7 +2153,7 @@  int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 	/* remove payload length from checksum */
 	paylen = skb->len - l4_start;
 
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+	if (skb_shinfo(skb)->gso_type & skb_is_gso_udp(skb)) {
 		csum_replace_by_diff(&l4.udp->check,
 				     (__force __wsum)htonl(paylen));
 		/* compute length of UDP segmentation header */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 698bb6a..86953b7 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5728,8 +5728,8 @@  static int igb_tso(struct igb_ring *tx_ring,
 	l4.hdr = skb_checksum_start(skb);
 
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-	type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
-		      E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
+	type_tucmd = skb_is_gso_udp(skb) ?
+		     E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 0b675c3..83e17a6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7957,8 +7957,8 @@  static int ixgbe_tso(struct ixgbe_ring *tx_ring,
 	l4.hdr = skb_checksum_start(skb);
 
 	/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
-	type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
-		      IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
+	type_tucmd = skb_is_gso_udp(skb) ?
+		     IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
 
 	/* initialize outer IP header fields */
 	if (ip.v4->version == 4) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 110476b..e6f098d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -114,7 +114,7 @@  static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
 					struct sk_buff *skb,
 					struct mlx5e_accel_tx_state *state)
 {
-	if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+	if (skb_is_gso(skb) && skb_is_gso_udp(skb))
 		mlx5e_udp_gso_handle_tx_skb(skb);
 
 #ifdef CONFIG_MLX5_EN_TLS
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index da596de..0a17ce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -172,7 +172,7 @@  mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb)
 		stats->tso_inner_packets++;
 		stats->tso_inner_bytes += skb->len - ihs;
 	} else {
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+		if (skb_is_gso_udp(skb))
 			ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
 		else
 			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 89b2b34..d683cd6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3061,7 +3061,7 @@  static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
 	first_tx = tx_q->cur_tx;
 
 	/* Compute header lengths */
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+	if (skb_is_gso_udp(skb)) {
 		proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
 		hdr = sizeof(struct udphdr);
 	} else {
@@ -3313,7 +3313,8 @@  static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 	if (skb_is_gso(skb) && priv->tso) {
 		if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
 			return stmmac_tso_xmit(skb, dev);
-		if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+		if (priv->plat->has_gmac4 &&
+		    (gso & (SKB_GSO_UDP_L4 | SKB_GSO_UDPV6_L4)))
 			return stmmac_tso_xmit(skb, dev);
 	}
 
@@ -4236,7 +4237,8 @@  static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
 	int gso = skb_shinfo(skb)->gso_type;
 
-	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
+	if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4 |
+		   SKB_GSO_UDPV6_L4)) {
 		/*
 		 * There is no way to determine the number of TSO/USO
 		 * capable Queues. Let's use always the Queue 0
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2cc3cf8..b7c1a76 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -54,6 +54,7 @@  enum {
 	NETIF_F_GSO_UDP_BIT,		/* ... UFO, deprecated except tuntap */
 	NETIF_F_GSO_UDP_L4_BIT,		/* ... UDP payload GSO (not UFO) */
 	NETIF_F_GSO_FRAGLIST_BIT,		/* ... Fraglist GSO */
+	NETIF_F_GSO_UDPV6_L4_BIT,	/* ... UDPv6 payload GSO (not UFO) */
 	/**/NETIF_F_GSO_LAST =		/* last bit, see GSO_MASK */
 		NETIF_F_GSO_FRAGLIST_BIT,
 
@@ -157,6 +158,7 @@  enum {
 #define NETIF_F_GRO_FRAGLIST	__NETIF_F(GRO_FRAGLIST)
 #define NETIF_F_GSO_FRAGLIST	__NETIF_F(GSO_FRAGLIST)
 #define NETIF_F_HW_MACSEC	__NETIF_F(HW_MACSEC)
+#define NETIF_F_GSO_UDPV6_L4	__NETIF_F(GSO_UDPV6_L4)
 
 /* Finds the next feature with the highest number of the range of start till 0.
  */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f9fcfd..7ad7f6e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4760,6 +4760,7 @@  static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 	BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
 	BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
+	BUILD_BUG_ON(SKB_GSO_UDPV6_L4 != (NETIF_F_GSO_UDPV6_L4 >> NETIF_F_GSO_SHIFT));
 
 	return (features & feature) == feature;
 }
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 46881d9..10b3264 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -596,6 +596,8 @@  enum {
 	SKB_GSO_UDP_L4 = 1 << 17,
 
 	SKB_GSO_FRAGLIST = 1 << 18,
+
+	SKB_GSO_UDPV6_L4 = 1 << 19,
 };
 
 #if BITS_PER_LONG > 32
@@ -4454,6 +4456,12 @@  static inline bool skb_is_gso_tcp(const struct sk_buff *skb)
 	return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6);
 }
 
+/* Note: Should be called only if skb_is_gso(skb) is true */
+static inline bool skb_is_gso_udp(const struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_UDPV6_L4);
+}
+
 static inline void skb_gso_reset(struct sk_buff *skb)
 {
 	skb_shinfo(skb)->gso_size = 0;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index aa84597..b151804 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -123,7 +123,7 @@  static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
 {
 	int gso_size;
 
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+	if (skb_is_gso_udp(skb)) {
 		gso_size = skb_shinfo(skb)->gso_size;
 		put_cmsg(msg, SOL_UDP, UDP_GRO, sizeof(gso_size), &gso_size);
 	}
@@ -132,7 +132,7 @@  static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
 static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
 {
 	return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
-	       skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
+	       skb_is_gso_udp(skb);
 }
 
 #define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/net/core/filter.c b/net/core/filter.c
index 47eef9a..a68f178 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3083,8 +3083,7 @@  static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
 
 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
 		/* udp gso_size delineates datagrams, only allow if fixed */
-		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
-		    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+		if (!skb_is_gso_udp(skb) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 			return -ENOTSUPP;
 	}
 
@@ -3181,8 +3180,7 @@  static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
 
 	if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
 		/* udp gso_size delineates datagrams, only allow if fixed */
-		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
-		    !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
+		if (!skb_is_gso_udp(skb) || !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
 			return -ENOTSUPP;
 	}
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a5c11aa..f233567 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5263,7 +5263,7 @@  static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
 		thlen = tcp_hdrlen(skb);
 	} else if (unlikely(skb_is_gso_sctp(skb))) {
 		thlen = sizeof(struct sctphdr);
-	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
+	} else if (shinfo->gso_type & (SKB_GSO_UDP_L4 | SKB_GSO_UDPV6_L4)) {
 		thlen = sizeof(struct udphdr);
 	}
 	/* UFO sets gso_size to the size of the fragmentation
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 29d9691..df5cc92 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1204,7 +1204,7 @@  static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
 
 		if (datalen > cork->gso_size) {
 			skb_shinfo(skb)->gso_size = cork->gso_size;
-			skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+			skb_shinfo(skb)->gso_type = SKB_GSO_UDPV6_L4;
 			skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
 								 cork->gso_size);
 		}
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 584157a..a02c9c3 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -39,13 +39,13 @@  static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
 		const struct ipv6hdr *ipv6h;
 		struct udphdr *uh;
 
-		if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
+		if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDPV6_L4)))
 			goto out;
 
 		if (!pskb_may_pull(skb, sizeof(struct udphdr)))
 			goto out;
 
-		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDPV6_L4)
 			return __udp_gso_segment(skb, features);
 
 		/* Do software UFO. Complete and fill in the UDP checksum as HW cannot
@@ -153,7 +153,7 @@  INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
 	if (NAPI_GRO_CB(skb)->is_flist) {
 		uh->len = htons(skb->len - nhoff);
 
-		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
+		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST | SKB_GSO_UDPV6_L4);
 		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
 
 		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {