@@ -198,23 +198,19 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- __be16 *hdr = (__be16 *)ul_header;
struct iphdr *ip4h = iphdr;
u16 offset;
+ u16 val;
offset = skb_transport_header(skb) - (unsigned char *)iphdr;
ul_header->csum_start_offset = htons(offset);
- ul_header->csum_insert_offset = skb->csum_offset;
- ul_header->csum_enabled = 1;
+ val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip4h->protocol == IPPROTO_UDP)
- ul_header->udp_ind = 1;
- else
- ul_header->udp_ind = 0;
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= u16_encode_bits(skb->csum_offset, MAP_CSUM_UL_OFFSET_FMASK);
- /* Changing remaining fields to network order */
- hdr++;
- *hdr = htons((__force u16)*hdr);
+ ul_header->csum_info = htons(val);
skb->ip_summed = CHECKSUM_NONE;
@@ -241,24 +237,19 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- __be16 *hdr = (__be16 *)ul_header;
struct ipv6hdr *ip6h = ip6hdr;
u16 offset;
+ u16 val;
offset = skb_transport_header(skb) - (unsigned char *)ip6hdr;
ul_header->csum_start_offset = htons(offset);
- ul_header->csum_insert_offset = skb->csum_offset;
- ul_header->csum_enabled = 1;
-
+ val = MAP_CSUM_UL_ENABLED_FLAG;
if (ip6h->nexthdr == IPPROTO_UDP)
- ul_header->udp_ind = 1;
- else
- ul_header->udp_ind = 0;
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= u16_encode_bits(skb->csum_offset, MAP_CSUM_UL_OFFSET_FMASK);
- /* Changing remaining fields to network order */
- hdr++;
- *hdr = htons((__force u16)*hdr);
+ ul_header->csum_info = htons(val);
skb->ip_summed = CHECKSUM_NONE;
@@ -425,10 +416,7 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
}
sw_csum:
- ul_header->csum_start_offset = 0;
- ul_header->csum_insert_offset = 0;
- ul_header->csum_enabled = 0;
- ul_header->udp_ind = 0;
+ memset(ul_header, 0, sizeof(*ul_header));
priv->stats.csum_sw++;
}
@@ -33,17 +33,16 @@ struct rmnet_map_dl_csum_trailer {
struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- u16 csum_insert_offset:14;
- u16 udp_ind:1;
- u16 csum_enabled:1;
-#elif defined (__BIG_ENDIAN_BITFIELD)
- u16 csum_enabled:1;
- u16 udp_ind:1;
- u16 csum_insert_offset:14;
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
+ __be16 csum_info; /* MAP_CSUM_UL_*_FMASK */
} __aligned(1);
+/* csum_info field:
+ * OFFSET: where (offset in bytes) to insert computed checksum
+ * UDP: 1 = UDP checksum (zero checkum means no checksum)
+ * ENABLED: 1 = checksum computation requested
+ */
+#define MAP_CSUM_UL_OFFSET_FMASK GENMASK(13, 0)
+#define MAP_CSUM_UL_UDP_FLAG BIT(14)
+#define MAP_CSUM_UL_ENABLED_FLAG BIT(15)
+
#endif /* !(_LINUX_IF_RMNET_H_) */
Replace the use of C bit-fields in the rmnet_map_ul_csum_header structure with a single two-byte (big endian) structure member, and use bit or field masks to encode or get values within it. Previously rmnet_map_ipv4_ul_csum_header() would update C bit-field values in host byte order, then forcibly fix their byte order using a combination of byte swap operations and types. Instead, just compute the value that needs to go into the new structure member and save it with a simple byte-order conversion. Make similar simplifications in rmnet_map_ipv6_ul_csum_header(). Finally, in rmnet_map_checksum_uplink_packet() a set of assignments zeroes every field in the upload checksum header. Replace that with a single memset() operation. Signed-off-by: Alex Elder <elder@linaro.org> Reported-by: kernel test robot <lkp@intel.com> --- v3: Use BIT(x) and don't use u16_get_bits() for single-bit flags v2: Fixed to use u16_encode_bits() instead of be16_encode_bits(). .../ethernet/qualcomm/rmnet/rmnet_map_data.c | 34 ++++++------------- include/linux/if_rmnet.h | 21 ++++++------ 2 files changed, 21 insertions(+), 34 deletions(-) -- 2.27.0