@@ -2946,6 +2946,11 @@ static void mptcp_release_cb(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
}
+ /* be sure to set the current sk state before tacking actions
+ * depending on sk_state
+ */
+ if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags))
+ __mptcp_set_connected(sk);
if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags))
__mptcp_clean_una_wakeup(sk);
if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
@@ -109,6 +109,7 @@
#define MPTCP_ERROR_REPORT 8
#define MPTCP_RETRANSMIT 9
#define MPTCP_WORK_SYNC_SETSOCKOPT 10
+#define MPTCP_CONNECTED 11
static inline bool before64(__u64 seq1, __u64 seq2)
{
@@ -579,6 +580,7 @@ void mptcp_get_options(const struct sk_buff *skb,
struct mptcp_options_received *mp_opt);
void mptcp_finish_connect(struct sock *sk);
+void __mptcp_set_connected(struct sock *sk);
static inline bool mptcp_is_fully_established(struct sock *sk)
{
return inet_sk_state_load(sk) == TCP_ESTABLISHED &&
@@ -371,6 +371,24 @@ static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct soc
return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
}
+void __mptcp_set_connected(struct sock *sk)
+{
+ if (sk->sk_state == TCP_SYN_SENT) {
+ inet_sk_state_store(sk, TCP_ESTABLISHED);
+ sk->sk_state_change(sk);
+ }
+}
+
+static void mptcp_set_connected(struct sock *sk)
+{
+ mptcp_data_lock(sk);
+ if (!sock_owned_by_user(sk))
+ __mptcp_set_connected(sk);
+ else
+ set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags);
+ mptcp_data_unlock(sk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -379,10 +397,6 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
/* be sure no special action on any packet other than syn-ack */
if (subflow->conn_finished)
@@ -411,6 +425,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
subflow->remote_key);
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
mptcp_finish_connect(sk);
+ mptcp_set_connected(parent);
} else if (subflow->request_join) {
u8 hmac[SHA256_DIGEST_SIZE];
@@ -451,6 +466,7 @@ static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
} else if (mptcp_check_fallback(sk)) {
fallback:
mptcp_rcv_space_init(mptcp_sk(parent), sk);
+ mptcp_set_connected(parent);
}
return;
@@ -558,6 +574,7 @@ static void mptcp_sock_destruct(struct sock *sk)
static void mptcp_force_close(struct sock *sk)
{
+ /* the msk is not yet exposed to user-space */
inet_sk_state_store(sk, TCP_CLOSE);
sk_common_release(sk);
}
@@ -1474,10 +1491,7 @@ static void subflow_state_change(struct sock *sk)
mptcp_rcv_space_init(mptcp_sk(parent), sk);
pr_fallback(mptcp_sk(parent));
subflow->conn_finished = 1;
- if (inet_sk_state_load(parent) == TCP_SYN_SENT) {
- inet_sk_state_store(parent, TCP_ESTABLISHED);
- parent->sk_state_change(parent);
- }
+ mptcp_set_connected(parent);
}
/* as recvmsg() does not acquire the subflow socket for ssk selection