diff options
author | Paolo Abeni <pabeni@redhat.com> | 2020-11-27 11:10:26 +0100 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2020-12-01 02:55:23 +0100 |
commit | 7439d687b79cbbd971c6a170be9aefda4a564be4 (patch) | |
tree | 0c3bf91c5429a40d22f78ec7aaa75e052370fd29 /net/mptcp/options.c | |
parent | mptcp: allocate TX skbs in msk context (diff) | |
download | linux-7439d687b79cbbd971c6a170be9aefda4a564be4.tar.xz linux-7439d687b79cbbd971c6a170be9aefda4a564be4.zip |
mptcp: avoid a few atomic ops in the rx path
Extending the data_lock scope in mptcp_incoming_option
we can use that to protect both snd_una and wnd_end.
In the typical case, we will have a single atomic op instead of 2
Acked-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp/options.c')
-rw-r--r-- | net/mptcp/options.c | 33 |
1 files changed, 13 insertions, 20 deletions
diff --git a/net/mptcp/options.c b/net/mptcp/options.c index 8a59b3e44599..3986454a0340 100644 --- a/net/mptcp/options.c +++ b/net/mptcp/options.c @@ -833,15 +833,17 @@ static void ack_update_msk(struct mptcp_sock *msk, const struct sock *ssk, struct mptcp_options_received *mp_opt) { - u64 new_snd_una, snd_una, old_snd_una = atomic64_read(&msk->snd_una); - u64 new_wnd_end, wnd_end, old_wnd_end = atomic64_read(&msk->wnd_end); - u64 snd_nxt = READ_ONCE(msk->snd_nxt); + u64 new_wnd_end, new_snd_una, snd_nxt = READ_ONCE(msk->snd_nxt); struct sock *sk = (struct sock *)msk; + u64 old_snd_una; + + mptcp_data_lock(sk); /* avoid ack expansion on update conflict, to reduce the risk of * wrongly expanding to a future ack sequence number, which is way * more dangerous than missing an ack */ + old_snd_una = msk->snd_una; new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64); /* ACK for data not even sent yet? Ignore. */ @@ -850,26 +852,17 @@ static void ack_update_msk(struct mptcp_sock *msk, new_wnd_end = new_snd_una + tcp_sk(ssk)->snd_wnd; - while (after64(new_wnd_end, old_wnd_end)) { - wnd_end = old_wnd_end; - old_wnd_end = atomic64_cmpxchg(&msk->wnd_end, wnd_end, - new_wnd_end); - if (old_wnd_end == wnd_end) { - if (mptcp_send_head(sk)) - mptcp_schedule_work(sk); - break; - } + if (after64(new_wnd_end, msk->wnd_end)) { + msk->wnd_end = new_wnd_end; + if (mptcp_send_head(sk)) + mptcp_schedule_work(sk); } - while (after64(new_snd_una, old_snd_una)) { - snd_una = old_snd_una; - old_snd_una = atomic64_cmpxchg(&msk->snd_una, snd_una, - new_snd_una); - if (old_snd_una == snd_una) { - mptcp_data_acked(sk); - break; - } + if (after64(new_snd_una, old_snd_una)) { + msk->snd_una = new_snd_una; + __mptcp_data_acked(sk); } + mptcp_data_unlock(sk); } bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit) |