@@ -1263,11 +1263,14 @@ static void mptcp_close(struct sock *sk, long timeout)
12631263
12641264 lock_sock (sk );
12651265
1266- mptcp_token_destroy (msk -> token );
12671266 inet_sk_state_store (sk , TCP_CLOSE );
12681267
1269- __mptcp_flush_join_list (msk );
1270-
1268+ /* be sure to always acquire the join list lock, to sync vs
1269+ * mptcp_finish_join().
1270+ */
1271+ spin_lock_bh (& msk -> join_list_lock );
1272+ list_splice_tail_init (& msk -> join_list , & msk -> conn_list );
1273+ spin_unlock_bh (& msk -> join_list_lock );
12711274 list_splice_init (& msk -> conn_list , & conn_list );
12721275
12731276 data_fin_tx_seq = msk -> write_seq ;
@@ -1457,6 +1460,7 @@ static void mptcp_destroy(struct sock *sk)
14571460{
14581461 struct mptcp_sock * msk = mptcp_sk (sk );
14591462
1463+ mptcp_token_destroy (msk -> token );
14601464 if (msk -> cached_ext )
14611465 __skb_ext_put (msk -> cached_ext );
14621466
@@ -1623,22 +1627,30 @@ bool mptcp_finish_join(struct sock *sk)
16231627 if (!msk -> pm .server_side )
16241628 return true;
16251629
1626- /* passive connection, attach to msk socket */
1630+ if (!mptcp_pm_allow_new_subflow (msk ))
1631+ return false;
1632+
1633+ /* active connections are already on conn_list, and we can't acquire
1634+ * msk lock here.
1635+ * use the join list lock as synchronization point and double-check
1636+ * msk status to avoid racing with mptcp_close()
1637+ */
1638+ spin_lock_bh (& msk -> join_list_lock );
1639+ ret = inet_sk_state_load (parent ) == TCP_ESTABLISHED ;
1640+ if (ret && !WARN_ON_ONCE (!list_empty (& subflow -> node )))
1641+ list_add_tail (& subflow -> node , & msk -> join_list );
1642+ spin_unlock_bh (& msk -> join_list_lock );
1643+ if (!ret )
1644+ return false;
1645+
1646+ /* attach to msk socket only after we are sure he will deal with us
1647+ * at close time
1648+ */
16271649 parent_sock = READ_ONCE (parent -> sk_socket );
16281650 if (parent_sock && !sk -> sk_socket )
16291651 mptcp_sock_graft (sk , parent_sock );
1630-
1631- ret = mptcp_pm_allow_new_subflow (msk );
1632- if (ret ) {
1633- subflow -> map_seq = msk -> ack_seq ;
1634-
1635- /* active connections are already on conn_list */
1636- spin_lock_bh (& msk -> join_list_lock );
1637- if (!WARN_ON_ONCE (!list_empty (& subflow -> node )))
1638- list_add_tail (& subflow -> node , & msk -> join_list );
1639- spin_unlock_bh (& msk -> join_list_lock );
1640- }
1641- return ret ;
1652+ subflow -> map_seq = msk -> ack_seq ;
1653+ return true;
16421654}
16431655
16441656bool mptcp_sk_is_subflow (const struct sock * sk )
@@ -1712,6 +1724,14 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
17121724 int err ;
17131725
17141726 lock_sock (sock -> sk );
1727+ if (sock -> state != SS_UNCONNECTED && msk -> subflow ) {
1728+ /* pending connection or invalid state, let existing subflow
1729+ * cope with that
1730+ */
1731+ ssock = msk -> subflow ;
1732+ goto do_connect ;
1733+ }
1734+
17151735 ssock = __mptcp_socket_create (msk , TCP_SYN_SENT );
17161736 if (IS_ERR (ssock )) {
17171737 err = PTR_ERR (ssock );
@@ -1726,9 +1746,17 @@ static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
17261746 mptcp_subflow_ctx (ssock -> sk )-> request_mptcp = 0 ;
17271747#endif
17281748
1749+ do_connect :
17291750 err = ssock -> ops -> connect (ssock , uaddr , addr_len , flags );
1730- inet_sk_state_store (sock -> sk , inet_sk_state_load (ssock -> sk ));
1731- mptcp_copy_inaddrs (sock -> sk , ssock -> sk );
1751+ sock -> state = ssock -> state ;
1752+
1753+ /* on successful connect, the msk state will be moved to established by
1754+ * subflow_finish_connect()
1755+ */
1756+ if (!err || err == EINPROGRESS )
1757+ mptcp_copy_inaddrs (sock -> sk , ssock -> sk );
1758+ else
1759+ inet_sk_state_store (sock -> sk , inet_sk_state_load (ssock -> sk ));
17321760
17331761unlock :
17341762 release_sock (sock -> sk );
0 commit comments