===============
[ Upstream commit b4391db42308c9940944b5d7be5ca4b78fb88dd0 ]
When CONFIG_KASAN is enabled, the "--param asan-stack=1" causes rather large
stack frames in some functions. This goes unnoticed normally because
CONFIG_FRAME_WARN is disabled with CONFIG_KASAN by default as of commit
3f181b4d8652 ("lib/Kconfig.debug: disable -Wframe-larger-than warnings with
KASAN=y").
The kernelci.org build bot however has the warning enabled and that led
me to investigate it a little further, as every build produces these warnings:
net/wireless/nl80211.c:4389:1: warning: the frame size of 2240 bytes is larger than 2048 bytes [-Wframe-larger-than=]
net/wireless/nl80211.c:1895:1: warning: the frame size of 3776 bytes is larger than 2048 bytes [-Wframe-larger-than=]
net/wireless/nl80211.c:1410:1: warning: the frame size of 2208 bytes is larger than 2048 bytes [-Wframe-larger-than=]
net/bridge/br_netlink.c:1282:1: warning: the frame size of 2544 bytes is larger than 2048 bytes [-Wframe-larger-than=]
Most of this problem is now solved in gcc-8, which can consolidate
the stack slots for the inline function arguments. On older compilers
we can add a workaround by declaring a local variable in each function
to pass the inline function argument.
Cc: stable@vger.kernel.org
Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <alexander.levin@microsoft.com>
---
include/net/netlink.h | 73 ++++++++++++++++++++++++++++++++++++++-------------
1 file changed, 55 insertions(+), 18 deletions(-)
@@ -745,7 +745,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
*/
static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
{
- return nla_put(skb, attrtype, sizeof(u8), &value);
+ /* temporary variables to work around GCC PR81715 with asan-stack=1 */
+ u8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u8), &tmp);
}
/**
@@ -756,7 +759,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
*/
static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
{
- return nla_put(skb, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u16), &tmp);
}
/**
@@ -767,7 +772,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
*/
static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put(skb, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be16), &tmp);
}
/**
@@ -778,7 +785,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
{
- return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be16 tmp = value;
+
+ return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -789,7 +798,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
*/
static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
{
- return nla_put(skb, attrtype, sizeof(__le16), &value);
+ __le16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le16), &tmp);
}
/**
@@ -800,7 +811,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
*/
static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
{
- return nla_put(skb, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u32), &tmp);
}
/**
@@ -811,7 +824,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
*/
static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put(skb, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be32), &tmp);
}
/**
@@ -822,7 +837,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
{
- return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be32 tmp = value;
+
+ return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -833,7 +850,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
*/
static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
{
- return nla_put(skb, attrtype, sizeof(__le32), &value);
+ __le32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le32), &tmp);
}
/**
@@ -844,7 +863,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
*/
static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
{
- return nla_put(skb, attrtype, sizeof(u64), &value);
+ u64 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(u64), &tmp);
}
/**
@@ -855,7 +876,9 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
*/
static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
{
- return nla_put(skb, attrtype, sizeof(__be64), &value);
+ __be64 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__be64), &tmp);
}
/**
@@ -866,7 +889,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
*/
static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
{
- return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
+ __be64 tmp = value;
+
+ return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp);
}
/**
@@ -877,7 +902,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
*/
static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
{
- return nla_put(skb, attrtype, sizeof(__le64), &value);
+ __le64 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(__le64), &tmp);
}
/**
@@ -888,7 +915,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
*/
static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
{
- return nla_put(skb, attrtype, sizeof(s8), &value);
+ s8 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s8), &tmp);
}
/**
@@ -899,7 +928,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value)
*/
static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
{
- return nla_put(skb, attrtype, sizeof(s16), &value);
+ s16 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s16), &tmp);
}
/**
@@ -910,7 +941,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value)
*/
static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
{
- return nla_put(skb, attrtype, sizeof(s32), &value);
+ s32 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s32), &tmp);
}
/**
@@ -921,7 +954,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value)
*/
static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value)
{
- return nla_put(skb, attrtype, sizeof(s64), &value);
+ s64 tmp = value;
+
+ return nla_put(skb, attrtype, sizeof(s64), &tmp);
}
/**
@@ -969,7 +1004,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype,
__be32 addr)
{
- return nla_put_be32(skb, attrtype, addr);
+ __be32 tmp = addr;
+
+ return nla_put_be32(skb, attrtype, tmp);
}
/**
From: Arnd Bergmann <arnd@arndb.de> This patch has been added to the 4.1 stable tree. If you have any objections, please let us know. -- 2.14.1