mirror of
https://github.com/torvalds/linux.git
synced 2025-11-01 17:18:25 +02:00
tls: handle data disappearing from under the TLS ULP
TLS expects that it owns the receive queue of the TCP socket.
This cannot be guaranteed in case the reader of the TCP socket
entered before the TLS ULP was installed, or uses some non-standard
read API (eg. zerocopy ones). Replace the WARN_ON() and a buggy
early exit (which leaves anchor pointing to a freed skb) with real
error handling. Wipe the parsing state and tell the reader to retry.
We already reload the anchor every time we (re)acquire the socket lock,
so the only condition we need to avoid is an out of bounds read
(not having enough bytes in the socket for previously parsed record len).
If some data was read from under TLS but there's enough in the queue
we'll reload and decrypt what is most likely not a valid TLS record.
Leading to some undefined behavior from TLS perspective (corrupting
a stream? missing an alert? missing an attack?) but no kernel crash
should take place.
Reported-by: William Liu <will@willsroot.io>
Reported-by: Savino Dicanosa <savy@syst3mfailure.io>
Link: https://lore.kernel.org/tFjq_kf7sWIG3A7CrCg_egb8CVsT_gsmHAK0_wxDPJXfIzxFAMxqmLwp3MlU5EHiet0AwwJldaaFdgyHpeIUCS-3m3llsmRzp9xIOBR4lAI=@syst3mfailure.io
Fixes: 84c61fe1a7 ("tls: rx: do not use the standard strparser")
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://patch.msgid.link/20250807232907.600366-1-kuba@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
2efe41234d
commit
6db015fc4b
3 changed files with 11 additions and 5 deletions
|
|
@ -196,7 +196,7 @@ void tls_strp_msg_done(struct tls_strparser *strp);
|
|||
int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb);
|
||||
void tls_rx_msg_ready(struct tls_strparser *strp);
|
||||
|
||||
void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
|
||||
bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh);
|
||||
int tls_strp_msg_cow(struct tls_sw_context_rx *ctx);
|
||||
struct sk_buff *tls_strp_msg_detach(struct tls_sw_context_rx *ctx);
|
||||
int tls_strp_msg_hold(struct tls_strparser *strp, struct sk_buff_head *dst);
|
||||
|
|
|
|||
|
|
@ -475,7 +475,7 @@ static void tls_strp_load_anchor_with_queue(struct tls_strparser *strp, int len)
|
|||
strp->stm.offset = offset;
|
||||
}
|
||||
|
||||
void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
||||
bool tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
||||
{
|
||||
struct strp_msg *rxm;
|
||||
struct tls_msg *tlm;
|
||||
|
|
@ -484,8 +484,11 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
|||
DEBUG_NET_WARN_ON_ONCE(!strp->stm.full_len);
|
||||
|
||||
if (!strp->copy_mode && force_refresh) {
|
||||
if (WARN_ON(tcp_inq(strp->sk) < strp->stm.full_len))
|
||||
return;
|
||||
if (unlikely(tcp_inq(strp->sk) < strp->stm.full_len)) {
|
||||
WRITE_ONCE(strp->msg_ready, 0);
|
||||
memset(&strp->stm, 0, sizeof(strp->stm));
|
||||
return false;
|
||||
}
|
||||
|
||||
tls_strp_load_anchor_with_queue(strp, strp->stm.full_len);
|
||||
}
|
||||
|
|
@ -495,6 +498,8 @@ void tls_strp_msg_load(struct tls_strparser *strp, bool force_refresh)
|
|||
rxm->offset = strp->stm.offset;
|
||||
tlm = tls_msg(strp->anchor);
|
||||
tlm->control = strp->mark;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Called with lock held on lower socket */
|
||||
|
|
|
|||
|
|
@ -1384,7 +1384,8 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
|
|||
return sock_intr_errno(timeo);
|
||||
}
|
||||
|
||||
tls_strp_msg_load(&ctx->strp, released);
|
||||
if (unlikely(!tls_strp_msg_load(&ctx->strp, released)))
|
||||
return tls_rx_rec_wait(sk, psock, nonblock, false);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in a new issue