mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 17:50:54 +07:00
26b87c7881
This scenario is not limited to ASCONF, just taken as one example triggering the issue. When receiving ASCONF probes in the form of ... -------------- INIT[ASCONF; ASCONF_ACK] -------------> <----------- INIT-ACK[ASCONF; ASCONF_ACK] ------------ -------------------- COOKIE-ECHO --------------------> <-------------------- COOKIE-ACK --------------------- ---- ASCONF_a; [ASCONF_b; ...; ASCONF_n;] JUNK ------> [...] ---- ASCONF_m; [ASCONF_o; ...; ASCONF_z;] JUNK ------> ... where ASCONF_a, ASCONF_b, ..., ASCONF_z are good-formed ASCONFs and have increasing serial numbers, we process such ASCONF chunk(s) marked with !end_of_packet and !singleton, since we have not yet reached the SCTP packet end. SCTP does only do verification on a chunk by chunk basis, as an SCTP packet is nothing more than just a container of a stream of chunks which it eats up one by one. We could run into the case that we receive a packet with a malformed tail, above marked as trailing JUNK. All previous chunks are here goodformed, so the stack will eat up all previous chunks up to this point. In case JUNK does not fit into a chunk header and there are no more other chunks in the input queue, or in case JUNK contains a garbage chunk header, but the encoded chunk length would exceed the skb tail, or we came here from an entirely different scenario and the chunk has pdiscard=1 mark (without having had a flush point), it will happen, that we will excessively queue up the association's output queue (a correct final chunk may then turn it into a response flood when flushing the queue ;)): I ran a simple script with incremental ASCONF serial numbers and could see the server side consuming excessive amount of RAM [before/after: up to 2GB and more]. The issue at heart is that the chunk train basically ends with !end_of_packet and !singleton markers and since commit2e3216cd54
("sctp: Follow security requirement of responding with 1 packet") therefore preventing an output queue flush point in sctp_do_sm() -> sctp_cmd_interpreter() on the input chunk (chunk = event_arg) even though local_cork is set, but its precedence has changed since then. In the normal case, the last chunk with end_of_packet=1 would trigger the queue flush to accommodate possible outgoing bundling. In the input queue, sctp_inq_pop() seems to do the right thing in terms of discarding invalid chunks. So, above JUNK will not enter the state machine and instead be released and exit the sctp_assoc_bh_rcv() chunk processing loop. It's simply the flush point being missing at loop exit. Adding a try-flush approach on the output queue might not work as the underlying infrastructure might be long gone at this point due to the side-effect interpreter run. One possibility, albeit a bit of a kludge, would be to defer invalid chunk freeing into the state machine in order to possibly trigger packet discards and thus indirectly a queue flush on error. It would surely be better to discard chunks as in the current, perhaps better controlled environment, but going back and forth, it's simply architecturally not possible. I tried various trailing JUNK attack cases and it seems to look good now. Joint work with Vlad Yasevich. Fixes:2e3216cd54
("sctp: Follow security requirement of responding with 1 packet") Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Vlad Yasevich <vyasevich@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
215 lines
6.2 KiB
C
215 lines
6.2 KiB
C
/* SCTP kernel implementation
|
|
* Copyright (c) 1999-2000 Cisco, Inc.
|
|
* Copyright (c) 1999-2001 Motorola, Inc.
|
|
* Copyright (c) 2002 International Business Machines, Corp.
|
|
*
|
|
* This file is part of the SCTP kernel implementation
|
|
*
|
|
* These functions are the methods for accessing the SCTP inqueue.
|
|
*
|
|
* An SCTP inqueue is a queue into which you push SCTP packets
|
|
* (which might be bundles or fragments of chunks) and out of which you
|
|
* pop SCTP whole chunks.
|
|
*
|
|
* This SCTP implementation is free software;
|
|
* you can redistribute it and/or modify it under the terms of
|
|
* the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
|
* any later version.
|
|
*
|
|
* This SCTP implementation is distributed in the hope that it
|
|
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
|
|
* ************************
|
|
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
|
* See the GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with GNU CC; see the file COPYING. If not, see
|
|
* <http://www.gnu.org/licenses/>.
|
|
*
|
|
* Please send any bug reports or fixes you make to the
|
|
* email address(es):
|
|
* lksctp developers <linux-sctp@vger.kernel.org>
|
|
*
|
|
* Written or modified by:
|
|
* La Monte H.P. Yarroll <piggy@acm.org>
|
|
* Karl Knutson <karl@athena.chicago.il.us>
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <net/sctp/sctp.h>
|
|
#include <net/sctp/sm.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/slab.h>
|
|
|
|
/* Initialize an SCTP inqueue. */
|
|
void sctp_inq_init(struct sctp_inq *queue)
|
|
{
|
|
INIT_LIST_HEAD(&queue->in_chunk_list);
|
|
queue->in_progress = NULL;
|
|
|
|
/* Create a task for delivering data. */
|
|
INIT_WORK(&queue->immediate, NULL);
|
|
}
|
|
|
|
/* Release the memory associated with an SCTP inqueue. */
|
|
void sctp_inq_free(struct sctp_inq *queue)
|
|
{
|
|
struct sctp_chunk *chunk, *tmp;
|
|
|
|
/* Empty the queue. */
|
|
list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
|
|
list_del_init(&chunk->list);
|
|
sctp_chunk_free(chunk);
|
|
}
|
|
|
|
/* If there is a packet which is currently being worked on,
|
|
* free it as well.
|
|
*/
|
|
if (queue->in_progress) {
|
|
sctp_chunk_free(queue->in_progress);
|
|
queue->in_progress = NULL;
|
|
}
|
|
}
|
|
|
|
/* Put a new packet in an SCTP inqueue.
|
|
* We assume that packet->sctp_hdr is set and in host byte order.
|
|
*/
|
|
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
|
|
{
|
|
/* Directly call the packet handling routine. */
|
|
if (chunk->rcvr->dead) {
|
|
sctp_chunk_free(chunk);
|
|
return;
|
|
}
|
|
|
|
/* We are now calling this either from the soft interrupt
|
|
* or from the backlog processing.
|
|
* Eventually, we should clean up inqueue to not rely
|
|
* on the BH related data structures.
|
|
*/
|
|
list_add_tail(&chunk->list, &q->in_chunk_list);
|
|
if (chunk->asoc)
|
|
chunk->asoc->stats.ipackets++;
|
|
q->immediate.func(&q->immediate);
|
|
}
|
|
|
|
/* Peek at the next chunk on the inqeue. */
|
|
struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
|
|
{
|
|
struct sctp_chunk *chunk;
|
|
sctp_chunkhdr_t *ch = NULL;
|
|
|
|
chunk = queue->in_progress;
|
|
/* If there is no more chunks in this packet, say so */
|
|
if (chunk->singleton ||
|
|
chunk->end_of_packet ||
|
|
chunk->pdiscard)
|
|
return NULL;
|
|
|
|
ch = (sctp_chunkhdr_t *)chunk->chunk_end;
|
|
|
|
return ch;
|
|
}
|
|
|
|
|
|
/* Extract a chunk from an SCTP inqueue.
|
|
*
|
|
* WARNING: If you need to put the chunk on another queue, you need to
|
|
* make a shallow copy (clone) of it.
|
|
*/
|
|
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
|
|
{
|
|
struct sctp_chunk *chunk;
|
|
sctp_chunkhdr_t *ch = NULL;
|
|
|
|
/* The assumption is that we are safe to process the chunks
|
|
* at this time.
|
|
*/
|
|
|
|
if ((chunk = queue->in_progress)) {
|
|
/* There is a packet that we have been working on.
|
|
* Any post processing work to do before we move on?
|
|
*/
|
|
if (chunk->singleton ||
|
|
chunk->end_of_packet ||
|
|
chunk->pdiscard) {
|
|
sctp_chunk_free(chunk);
|
|
chunk = queue->in_progress = NULL;
|
|
} else {
|
|
/* Nothing to do. Next chunk in the packet, please. */
|
|
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
|
|
/* Force chunk->skb->data to chunk->chunk_end. */
|
|
skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
|
|
/* We are guaranteed to pull a SCTP header. */
|
|
}
|
|
}
|
|
|
|
/* Do we need to take the next packet out of the queue to process? */
|
|
if (!chunk) {
|
|
struct list_head *entry;
|
|
|
|
/* Is the queue empty? */
|
|
if (list_empty(&queue->in_chunk_list))
|
|
return NULL;
|
|
|
|
entry = queue->in_chunk_list.next;
|
|
chunk = queue->in_progress =
|
|
list_entry(entry, struct sctp_chunk, list);
|
|
list_del_init(entry);
|
|
|
|
/* This is the first chunk in the packet. */
|
|
chunk->singleton = 1;
|
|
ch = (sctp_chunkhdr_t *) chunk->skb->data;
|
|
chunk->data_accepted = 0;
|
|
}
|
|
|
|
chunk->chunk_hdr = ch;
|
|
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
|
|
/* In the unlikely case of an IP reassembly, the skb could be
|
|
* non-linear. If so, update chunk_end so that it doesn't go past
|
|
* the skb->tail.
|
|
*/
|
|
if (unlikely(skb_is_nonlinear(chunk->skb))) {
|
|
if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
|
|
chunk->chunk_end = skb_tail_pointer(chunk->skb);
|
|
}
|
|
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
|
|
chunk->subh.v = NULL; /* Subheader is no longer valid. */
|
|
|
|
if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
|
|
skb_tail_pointer(chunk->skb)) {
|
|
/* This is not a singleton */
|
|
chunk->singleton = 0;
|
|
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
|
|
/* Discard inside state machine. */
|
|
chunk->pdiscard = 1;
|
|
chunk->chunk_end = skb_tail_pointer(chunk->skb);
|
|
} else {
|
|
/* We are at the end of the packet, so mark the chunk
|
|
* in case we need to send a SACK.
|
|
*/
|
|
chunk->end_of_packet = 1;
|
|
}
|
|
|
|
pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
|
|
chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
|
|
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
|
|
|
|
return chunk;
|
|
}
|
|
|
|
/* Set a top-half handler.
|
|
*
|
|
* Originally, we the top-half handler was scheduled as a BH. We now
|
|
* call the handler directly in sctp_inq_push() at a time that
|
|
* we know we are lock safe.
|
|
* The intent is that this routine will pull stuff out of the
|
|
* inqueue and process it.
|
|
*/
|
|
void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
|
|
{
|
|
INIT_WORK(&q->immediate, callback);
|
|
}
|