diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/Kconfig | 103 | ||||
-rw-r--r-- | net/sctp/Makefile | 21 | ||||
-rw-r--r-- | net/sctp/associola.c | 1691 | ||||
-rw-r--r-- | net/sctp/auth.c | 953 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 552 | ||||
-rw-r--r-- | net/sctp/chunk.c | 365 | ||||
-rw-r--r-- | net/sctp/debug.c | 172 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 501 | ||||
-rw-r--r-- | net/sctp/input.c | 1141 | ||||
-rw-r--r-- | net/sctp/inqueue.c | 214 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 1076 | ||||
-rw-r--r-- | net/sctp/objcnt.c | 142 | ||||
-rw-r--r-- | net/sctp/output.c | 788 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 1787 | ||||
-rw-r--r-- | net/sctp/primitive.c | 213 | ||||
-rw-r--r-- | net/sctp/probe.c | 243 | ||||
-rw-r--r-- | net/sctp/proc.c | 555 | ||||
-rw-r--r-- | net/sctp/protocol.c | 1550 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 3501 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 1751 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 6301 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 933 | ||||
-rw-r--r-- | net/sctp/socket.c | 7419 | ||||
-rw-r--r-- | net/sctp/ssnmap.c | 125 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 501 | ||||
-rw-r--r-- | net/sctp/transport.c | 656 | ||||
-rw-r--r-- | net/sctp/tsnmap.c | 379 | ||||
-rw-r--r-- | net/sctp/ulpevent.c | 1065 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 1144 |
29 files changed, 35842 insertions, 0 deletions
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig new file mode 100644 index 000000000..71c1a598d --- /dev/null +++ b/net/sctp/Kconfig @@ -0,0 +1,103 @@ +# +# SCTP configuration +# + +menuconfig IP_SCTP + tristate "The SCTP Protocol" + depends on INET + depends on IPV6 || IPV6=n + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA1 + select LIBCRC32C + ---help--- + Stream Control Transmission Protocol + + From RFC 2960 <http://www.ietf.org/rfc/rfc2960.txt>. + + "SCTP is a reliable transport protocol operating on top of a + connectionless packet network such as IP. It offers the following + services to its users: + + -- acknowledged error-free non-duplicated transfer of user data, + -- data fragmentation to conform to discovered path MTU size, + -- sequenced delivery of user messages within multiple streams, + with an option for order-of-arrival delivery of individual user + messages, + -- optional bundling of multiple user messages into a single SCTP + packet, and + -- network-level fault tolerance through supporting of multi- + homing at either or both ends of an association." + + To compile this protocol support as a module, choose M here: the + module will be called sctp. Debug messages are handeled by the + kernel's dynamic debugging framework. + + If in doubt, say N. + +if IP_SCTP + +config NET_SCTPPROBE + tristate "SCTP: Association probing" + depends on PROC_FS && KPROBES + ---help--- + This module allows for capturing the changes to SCTP association + state in response to incoming packets. It is used for debugging + SCTP congestion control algorithms. If you don't understand + what was just said, you don't need it: say N. + + To compile this code as a module, choose M here: the + module will be called sctp_probe. + +config SCTP_DBG_OBJCNT + bool "SCTP: Debug object counts" + depends on PROC_FS + help + If you say Y, this will enable debugging support for counting the + type of objects that are currently allocated. This is useful for + identifying memory leaks. This debug information can be viewed by + 'cat /proc/net/sctp/sctp_dbg_objcnt' + + If unsure, say N +choice + prompt "Default SCTP cookie HMAC encoding" + default SCTP_DEFAULT_COOKIE_HMAC_MD5 + help + This option sets the default sctp cookie hmac algorithm + when in doubt select 'md5' + +config SCTP_DEFAULT_COOKIE_HMAC_MD5 + bool "Enable optional MD5 hmac cookie generation" + help + Enable optional MD5 hmac based SCTP cookie generation + select SCTP_COOKIE_HMAC_MD5 + +config SCTP_DEFAULT_COOKIE_HMAC_SHA1 + bool "Enable optional SHA1 hmac cookie generation" + help + Enable optional SHA1 hmac based SCTP cookie generation + select SCTP_COOKIE_HMAC_SHA1 + +config SCTP_DEFAULT_COOKIE_HMAC_NONE + bool "Use no hmac alg in SCTP cookie generation" + help + Use no hmac algorithm in SCTP cookie generation + +endchoice + +config SCTP_COOKIE_HMAC_MD5 + bool "Enable optional MD5 hmac cookie generation" + help + Enable optional MD5 hmac based SCTP cookie generation + select CRYPTO_HMAC if SCTP_COOKIE_HMAC_MD5 + select CRYPTO_MD5 if SCTP_COOKIE_HMAC_MD5 + +config SCTP_COOKIE_HMAC_SHA1 + bool "Enable optional SHA1 hmac cookie generation" + help + Enable optional SHA1 hmac based SCTP cookie generation + select CRYPTO_HMAC if SCTP_COOKIE_HMAC_SHA1 + select CRYPTO_SHA1 if SCTP_COOKIE_HMAC_SHA1 + + +endif # IP_SCTP diff --git a/net/sctp/Makefile b/net/sctp/Makefile new file mode 100644 index 000000000..3b4ffb021 --- /dev/null +++ b/net/sctp/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for SCTP support code. +# + +obj-$(CONFIG_IP_SCTP) += sctp.o +obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o + +sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ + protocol.o endpointola.o associola.o \ + transport.o chunk.o sm_make_chunk.o ulpevent.o \ + inqueue.o outqueue.o ulpqueue.o \ + tsnmap.o bind_addr.o socket.o primitive.o \ + output.o input.o debug.o ssnmap.o auth.o + +sctp_probe-y := probe.o + +sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o +sctp-$(CONFIG_PROC_FS) += proc.o +sctp-$(CONFIG_SYSCTL) += sysctl.o + +sctp-$(subst m,y,$(CONFIG_IPV6)) += ipv6.o diff --git a/net/sctp/associola.c b/net/sctp/associola.c new file mode 100644 index 000000000..197c3f59e --- /dev/null +++ b/net/sctp/associola.c @@ -0,0 +1,1691 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * This module provides the abstraction for an SCTP association. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Xingang Guo <xingang.guo@intel.com> + * Hui Huang <hui.huang@nokia.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + * Ryan Layer <rmlayer@us.ibm.com> + * Kevin Gao <kevin.gao@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/fcntl.h> +#include <linux/poll.h> +#include <linux/init.h> + +#include <linux/slab.h> +#include <linux/in.h> +#include <net/ipv6.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Forward declarations for internal functions. */ +static void sctp_select_active_and_retran_path(struct sctp_association *asoc); +static void sctp_assoc_bh_rcv(struct work_struct *work); +static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc); + +/* 1st Level Abstractions. */ + +/* Initialize a new association from provided memory. */ +static struct sctp_association *sctp_association_init(struct sctp_association *asoc, + const struct sctp_endpoint *ep, + const struct sock *sk, + sctp_scope_t scope, + gfp_t gfp) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + int i; + sctp_paramhdr_t *p; + int err; + + /* Retrieve the SCTP per socket area. */ + sp = sctp_sk((struct sock *)sk); + + /* Discarding const is appropriate here. */ + asoc->ep = (struct sctp_endpoint *)ep; + asoc->base.sk = (struct sock *)sk; + + sctp_endpoint_hold(asoc->ep); + sock_hold(asoc->base.sk); + + /* Initialize the common base substructure. */ + asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; + + /* Initialize the object handling fields. */ + atomic_set(&asoc->base.refcnt, 1); + + /* Initialize the bind addr area. */ + sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); + + asoc->state = SCTP_STATE_CLOSED; + asoc->cookie_life = ms_to_ktime(sp->assocparams.sasoc_cookie_life); + asoc->user_frag = sp->user_frag; + + /* Set the association max_retrans and RTO values from the + * socket values. + */ + asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; + asoc->pf_retrans = net->sctp.pf_retrans; + + asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); + asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); + asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); + + /* Initialize the association's heartbeat interval based on the + * sock configured value. + */ + asoc->hbinterval = msecs_to_jiffies(sp->hbinterval); + + /* Initialize path max retrans value. */ + asoc->pathmaxrxt = sp->pathmaxrxt; + + /* Initialize default path MTU. */ + asoc->pathmtu = sp->pathmtu; + + /* Set association default SACK delay */ + asoc->sackdelay = msecs_to_jiffies(sp->sackdelay); + asoc->sackfreq = sp->sackfreq; + + /* Set the association default flags controlling + * Heartbeat, SACK delay, and Path MTU Discovery. + */ + asoc->param_flags = sp->param_flags; + + /* Initialize the maximum number of new data packets that can be sent + * in a burst. + */ + asoc->max_burst = sp->max_burst; + + /* initialize association timers */ + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; + asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; + + /* sctpimpguide Section 2.12.2 + * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the + * recommended value of 5 times 'RTO.Max'. + */ + asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] + = 5 * asoc->rto_max; + + asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay; + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; + + /* Initializes the timers */ + for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) + setup_timer(&asoc->timers[i], sctp_timer_events[i], + (unsigned long)asoc); + + /* Pull default initialization values from the sock options. + * Note: This assumes that the values have already been + * validated in the sock. + */ + asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; + asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; + asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; + + asoc->max_init_timeo = + msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); + + /* Set the local window size for receive. + * This is also the rcvbuf space per association. + * RFC 6 - A SCTP receiver MUST be able to receive a minimum of + * 1500 bytes in one SCTP packet. + */ + if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) + asoc->rwnd = SCTP_DEFAULT_MINWINDOW; + else + asoc->rwnd = sk->sk_rcvbuf/2; + + asoc->a_rwnd = asoc->rwnd; + + /* Use my own max window until I learn something better. */ + asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; + + /* Initialize the receive memory counter */ + atomic_set(&asoc->rmem_alloc, 0); + + init_waitqueue_head(&asoc->wait); + + asoc->c.my_vtag = sctp_generate_tag(ep); + asoc->c.my_port = ep->base.bind_addr.port; + + asoc->c.initial_tsn = sctp_generate_tsn(ep); + + asoc->next_tsn = asoc->c.initial_tsn; + + asoc->ctsn_ack_point = asoc->next_tsn - 1; + asoc->adv_peer_ack_point = asoc->ctsn_ack_point; + asoc->highest_sacked = asoc->ctsn_ack_point; + asoc->last_cwr_tsn = asoc->ctsn_ack_point; + + /* ADDIP Section 4.1 Asconf Chunk Procedures + * + * When an endpoint has an ASCONF signaled change to be sent to the + * remote endpoint it should do the following: + * ... + * A2) a serial number should be assigned to the chunk. The serial + * number SHOULD be a monotonically increasing number. The serial + * numbers SHOULD be initialized at the start of the + * association to the same value as the initial TSN. + */ + asoc->addip_serial = asoc->c.initial_tsn; + + INIT_LIST_HEAD(&asoc->addip_chunk_list); + INIT_LIST_HEAD(&asoc->asconf_ack_list); + + /* Make an empty list of remote transport addresses. */ + INIT_LIST_HEAD(&asoc->peer.transport_addr_list); + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * After the reception of the first data chunk in an + * association the endpoint must immediately respond with a + * sack to acknowledge the data chunk. Subsequent + * acknowledgements should be done as described in Section + * 6.2. + * + * [We implement this by telling a new association that it + * already received one packet.] + */ + asoc->peer.sack_needed = 1; + asoc->peer.sack_generation = 1; + + /* Assume that the peer will tell us if he recognizes ASCONF + * as part of INIT exchange. + * The sctp_addip_noauth option is there for backward compatibility + * and will revert old behavior. + */ + if (net->sctp.addip_noauth) + asoc->peer.asconf_capable = 1; + + /* Create an input queue. */ + sctp_inq_init(&asoc->base.inqueue); + sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); + + /* Create an output queue. */ + sctp_outq_init(asoc, &asoc->outqueue); + + if (!sctp_ulpq_init(&asoc->ulpq, asoc)) + goto fail_init; + + /* Assume that peer would support both address types unless we are + * told otherwise. + */ + asoc->peer.ipv4_address = 1; + if (asoc->base.sk->sk_family == PF_INET6) + asoc->peer.ipv6_address = 1; + INIT_LIST_HEAD(&asoc->asocs); + + asoc->default_stream = sp->default_stream; + asoc->default_ppid = sp->default_ppid; + asoc->default_flags = sp->default_flags; + asoc->default_context = sp->default_context; + asoc->default_timetolive = sp->default_timetolive; + asoc->default_rcv_context = sp->default_rcv_context; + + /* AUTH related initializations */ + INIT_LIST_HEAD(&asoc->endpoint_shared_keys); + err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); + if (err) + goto fail_init; + + asoc->active_key_id = ep->active_key_id; + + /* Save the hmacs and chunks list into this association */ + if (ep->auth_hmacs_list) + memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list, + ntohs(ep->auth_hmacs_list->param_hdr.length)); + if (ep->auth_chunk_list) + memcpy(asoc->c.auth_chunks, ep->auth_chunk_list, + ntohs(ep->auth_chunk_list->param_hdr.length)); + + /* Get the AUTH random number for this association */ + p = (sctp_paramhdr_t *)asoc->c.auth_random; + p->type = SCTP_PARAM_RANDOM; + p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH); + get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH); + + return asoc; + +fail_init: + sock_put(asoc->base.sk); + sctp_endpoint_put(asoc->ep); + return NULL; +} + +/* Allocate and initialize a new association */ +struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, + const struct sock *sk, + sctp_scope_t scope, + gfp_t gfp) +{ + struct sctp_association *asoc; + + asoc = kzalloc(sizeof(*asoc), gfp); + if (!asoc) + goto fail; + + if (!sctp_association_init(asoc, ep, sk, scope, gfp)) + goto fail_init; + + SCTP_DBG_OBJCNT_INC(assoc); + + pr_debug("Created asoc %p\n", asoc); + + return asoc; + +fail_init: + kfree(asoc); +fail: + return NULL; +} + +/* Free this association if possible. There may still be users, so + * the actual deallocation may be delayed. + */ +void sctp_association_free(struct sctp_association *asoc) +{ + struct sock *sk = asoc->base.sk; + struct sctp_transport *transport; + struct list_head *pos, *temp; + int i; + + /* Only real associations count against the endpoint, so + * don't bother for if this is a temporary association. + */ + if (!list_empty(&asoc->asocs)) { + list_del(&asoc->asocs); + + /* Decrement the backlog value for a TCP-style listening + * socket. + */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) + sk->sk_ack_backlog--; + } + + /* Mark as dead, so other users can know this structure is + * going away. + */ + asoc->base.dead = true; + + /* Dispose of any data lying around in the outqueue. */ + sctp_outq_free(&asoc->outqueue); + + /* Dispose of any pending messages for the upper layer. */ + sctp_ulpq_free(&asoc->ulpq); + + /* Dispose of any pending chunks on the inqueue. */ + sctp_inq_free(&asoc->base.inqueue); + + sctp_tsnmap_free(&asoc->peer.tsn_map); + + /* Free ssnmap storage. */ + sctp_ssnmap_free(asoc->ssnmap); + + /* Clean up the bound address list. */ + sctp_bind_addr_free(&asoc->base.bind_addr); + + /* Do we need to go through all of our timers and + * delete them? To be safe we will try to delete all, but we + * should be able to go through and make a guess based + * on our state. + */ + for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { + if (del_timer(&asoc->timers[i])) + sctp_association_put(asoc); + } + + /* Free peer's cached cookie. */ + kfree(asoc->peer.cookie); + kfree(asoc->peer.peer_random); + kfree(asoc->peer.peer_chunks); + kfree(asoc->peer.peer_hmacs); + + /* Release the transport structures. */ + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + transport = list_entry(pos, struct sctp_transport, transports); + list_del_rcu(pos); + sctp_transport_free(transport); + } + + asoc->peer.transport_count = 0; + + sctp_asconf_queue_teardown(asoc); + + /* Free pending address space being deleted */ + kfree(asoc->asconf_addr_del_pending); + + /* AUTH - Free the endpoint shared keys */ + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + + /* AUTH - Free the association shared key */ + sctp_auth_key_put(asoc->asoc_shared_key); + + sctp_association_put(asoc); +} + +/* Cleanup and free up an association. */ +static void sctp_association_destroy(struct sctp_association *asoc) +{ + if (unlikely(!asoc->base.dead)) { + WARN(1, "Attempt to destroy undead association %p!\n", asoc); + return; + } + + sctp_endpoint_put(asoc->ep); + sock_put(asoc->base.sk); + + if (asoc->assoc_id != 0) { + spin_lock_bh(&sctp_assocs_id_lock); + idr_remove(&sctp_assocs_id, asoc->assoc_id); + spin_unlock_bh(&sctp_assocs_id_lock); + } + + WARN_ON(atomic_read(&asoc->rmem_alloc)); + + kfree(asoc); + SCTP_DBG_OBJCNT_DEC(assoc); +} + +/* Change the primary destination address for the peer. */ +void sctp_assoc_set_primary(struct sctp_association *asoc, + struct sctp_transport *transport) +{ + int changeover = 0; + + /* it's a changeover only if we already have a primary path + * that we are changing + */ + if (asoc->peer.primary_path != NULL && + asoc->peer.primary_path != transport) + changeover = 1 ; + + asoc->peer.primary_path = transport; + + /* Set a default msg_name for events. */ + memcpy(&asoc->peer.primary_addr, &transport->ipaddr, + sizeof(union sctp_addr)); + + /* If the primary path is changing, assume that the + * user wants to use this new path. + */ + if ((transport->state == SCTP_ACTIVE) || + (transport->state == SCTP_UNKNOWN)) + asoc->peer.active_path = transport; + + /* + * SFR-CACC algorithm: + * Upon the receipt of a request to change the primary + * destination address, on the data structure for the new + * primary destination, the sender MUST do the following: + * + * 1) If CHANGEOVER_ACTIVE is set, then there was a switch + * to this destination address earlier. The sender MUST set + * CYCLING_CHANGEOVER to indicate that this switch is a + * double switch to the same destination address. + * + * Really, only bother is we have data queued or outstanding on + * the association. + */ + if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen) + return; + + if (transport->cacc.changeover_active) + transport->cacc.cycling_changeover = changeover; + + /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that + * a changeover has occurred. + */ + transport->cacc.changeover_active = changeover; + + /* 3) The sender MUST store the next TSN to be sent in + * next_tsn_at_change. + */ + transport->cacc.next_tsn_at_change = asoc->next_tsn; +} + +/* Remove a transport from an association. */ +void sctp_assoc_rm_peer(struct sctp_association *asoc, + struct sctp_transport *peer) +{ + struct list_head *pos; + struct sctp_transport *transport; + + pr_debug("%s: association:%p addr:%pISpc\n", + __func__, asoc, &peer->ipaddr.sa); + + /* If we are to remove the current retran_path, update it + * to the next peer before removing this peer from the list. + */ + if (asoc->peer.retran_path == peer) + sctp_assoc_update_retran_path(asoc); + + /* Remove this peer from the list. */ + list_del_rcu(&peer->transports); + + /* Get the first transport of asoc. */ + pos = asoc->peer.transport_addr_list.next; + transport = list_entry(pos, struct sctp_transport, transports); + + /* Update any entries that match the peer to be deleted. */ + if (asoc->peer.primary_path == peer) + sctp_assoc_set_primary(asoc, transport); + if (asoc->peer.active_path == peer) + asoc->peer.active_path = transport; + if (asoc->peer.retran_path == peer) + asoc->peer.retran_path = transport; + if (asoc->peer.last_data_from == peer) + asoc->peer.last_data_from = transport; + + /* If we remove the transport an INIT was last sent to, set it to + * NULL. Combined with the update of the retran path above, this + * will cause the next INIT to be sent to the next available + * transport, maintaining the cycle. + */ + if (asoc->init_last_sent_to == peer) + asoc->init_last_sent_to = NULL; + + /* If we remove the transport an SHUTDOWN was last sent to, set it + * to NULL. Combined with the update of the retran path above, this + * will cause the next SHUTDOWN to be sent to the next available + * transport, maintaining the cycle. + */ + if (asoc->shutdown_last_sent_to == peer) + asoc->shutdown_last_sent_to = NULL; + + /* If we remove the transport an ASCONF was last sent to, set it to + * NULL. + */ + if (asoc->addip_last_asconf && + asoc->addip_last_asconf->transport == peer) + asoc->addip_last_asconf->transport = NULL; + + /* If we have something on the transmitted list, we have to + * save it off. The best place is the active path. + */ + if (!list_empty(&peer->transmitted)) { + struct sctp_transport *active = asoc->peer.active_path; + struct sctp_chunk *ch; + + /* Reset the transport of each chunk on this list */ + list_for_each_entry(ch, &peer->transmitted, + transmitted_list) { + ch->transport = NULL; + ch->rtt_in_progress = 0; + } + + list_splice_tail_init(&peer->transmitted, + &active->transmitted); + + /* Start a T3 timer here in case it wasn't running so + * that these migrated packets have a chance to get + * retransmitted. + */ + if (!timer_pending(&active->T3_rtx_timer)) + if (!mod_timer(&active->T3_rtx_timer, + jiffies + active->rto)) + sctp_transport_hold(active); + } + + asoc->peer.transport_count--; + + sctp_transport_free(peer); +} + +/* Add a transport address to an association. */ +struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, + const union sctp_addr *addr, + const gfp_t gfp, + const int peer_state) +{ + struct net *net = sock_net(asoc->base.sk); + struct sctp_transport *peer; + struct sctp_sock *sp; + unsigned short port; + + sp = sctp_sk(asoc->base.sk); + + /* AF_INET and AF_INET6 share common port field. */ + port = ntohs(addr->v4.sin_port); + + pr_debug("%s: association:%p addr:%pISpc state:%d\n", __func__, + asoc, &addr->sa, peer_state); + + /* Set the port if it has not been set yet. */ + if (0 == asoc->peer.port) + asoc->peer.port = port; + + /* Check to see if this is a duplicate. */ + peer = sctp_assoc_lookup_paddr(asoc, addr); + if (peer) { + /* An UNKNOWN state is only set on transports added by + * user in sctp_connectx() call. Such transports should be + * considered CONFIRMED per RFC 4960, Section 5.4. + */ + if (peer->state == SCTP_UNKNOWN) { + peer->state = SCTP_ACTIVE; + } + return peer; + } + + peer = sctp_transport_new(net, addr, gfp); + if (!peer) + return NULL; + + sctp_transport_set_owner(peer, asoc); + + /* Initialize the peer's heartbeat interval based on the + * association configured value. + */ + peer->hbinterval = asoc->hbinterval; + + /* Set the path max_retrans. */ + peer->pathmaxrxt = asoc->pathmaxrxt; + + /* And the partial failure retrans threshold */ + peer->pf_retrans = asoc->pf_retrans; + + /* Initialize the peer's SACK delay timeout based on the + * association configured value. + */ + peer->sackdelay = asoc->sackdelay; + peer->sackfreq = asoc->sackfreq; + + /* Enable/disable heartbeat, SACK delay, and path MTU discovery + * based on association setting. + */ + peer->param_flags = asoc->param_flags; + + sctp_transport_route(peer, NULL, sp); + + /* Initialize the pmtu of the transport. */ + if (peer->param_flags & SPP_PMTUD_DISABLE) { + if (asoc->pathmtu) + peer->pathmtu = asoc->pathmtu; + else + peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT; + } + + /* If this is the first transport addr on this association, + * initialize the association PMTU to the peer's PMTU. + * If not and the current association PMTU is higher than the new + * peer's PMTU, reset the association PMTU to the new peer's PMTU. + */ + if (asoc->pathmtu) + asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu); + else + asoc->pathmtu = peer->pathmtu; + + pr_debug("%s: association:%p PMTU set to %d\n", __func__, asoc, + asoc->pathmtu); + + peer->pmtu_pending = 0; + + asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); + + /* The asoc->peer.port might not be meaningful yet, but + * initialize the packet structure anyway. + */ + sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, + asoc->peer.port); + + /* 7.2.1 Slow-Start + * + * o The initial cwnd before DATA transmission or after a sufficiently + * long idle period MUST be set to + * min(4*MTU, max(2*MTU, 4380 bytes)) + * + * o The initial value of ssthresh MAY be arbitrarily high + * (for example, implementations MAY use the size of the + * receiver advertised window). + */ + peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); + + /* At this point, we may not have the receiver's advertised window, + * so initialize ssthresh to the default value and it will be set + * later when we process the INIT. + */ + peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; + + peer->partial_bytes_acked = 0; + peer->flight_size = 0; + peer->burst_limited = 0; + + /* Set the transport's RTO.initial value */ + peer->rto = asoc->rto_initial; + sctp_max_rto(asoc, peer); + + /* Set the peer's active state. */ + peer->state = peer_state; + + /* Attach the remote transport to our asoc. */ + list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list); + asoc->peer.transport_count++; + + /* If we do not yet have a primary path, set one. */ + if (!asoc->peer.primary_path) { + sctp_assoc_set_primary(asoc, peer); + asoc->peer.retran_path = peer; + } + + if (asoc->peer.active_path == asoc->peer.retran_path && + peer->state != SCTP_UNCONFIRMED) { + asoc->peer.retran_path = peer; + } + + return peer; +} + +/* Delete a transport address from an association. */ +void sctp_assoc_del_peer(struct sctp_association *asoc, + const union sctp_addr *addr) +{ + struct list_head *pos; + struct list_head *temp; + struct sctp_transport *transport; + + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + transport = list_entry(pos, struct sctp_transport, transports); + if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { + /* Do book keeping for removing the peer and free it. */ + sctp_assoc_rm_peer(asoc, transport); + break; + } + } +} + +/* Lookup a transport by address. */ +struct sctp_transport *sctp_assoc_lookup_paddr( + const struct sctp_association *asoc, + const union sctp_addr *address) +{ + struct sctp_transport *t; + + /* Cycle through all transports searching for a peer address. */ + + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + if (sctp_cmp_addr_exact(address, &t->ipaddr)) + return t; + } + + return NULL; +} + +/* Remove all transports except a give one */ +void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc, + struct sctp_transport *primary) +{ + struct sctp_transport *temp; + struct sctp_transport *t; + + list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list, + transports) { + /* if the current transport is not the primary one, delete it */ + if (t != primary) + sctp_assoc_rm_peer(asoc, t); + } +} + +/* Engage in transport control operations. + * Mark the transport up or down and send a notification to the user. + * Select and update the new active and retran paths. + */ +void sctp_assoc_control_transport(struct sctp_association *asoc, + struct sctp_transport *transport, + sctp_transport_cmd_t command, + sctp_sn_error_t error) +{ + struct sctp_ulpevent *event; + struct sockaddr_storage addr; + int spc_state = 0; + bool ulp_notify = true; + + /* Record the transition on the transport. */ + switch (command) { + case SCTP_TRANSPORT_UP: + /* If we are moving from UNCONFIRMED state due + * to heartbeat success, report the SCTP_ADDR_CONFIRMED + * state to the user, otherwise report SCTP_ADDR_AVAILABLE. + */ + if (SCTP_UNCONFIRMED == transport->state && + SCTP_HEARTBEAT_SUCCESS == error) + spc_state = SCTP_ADDR_CONFIRMED; + else + spc_state = SCTP_ADDR_AVAILABLE; + /* Don't inform ULP about transition from PF to + * active state and set cwnd to 1 MTU, see SCTP + * Quick failover draft section 5.1, point 5 + */ + if (transport->state == SCTP_PF) { + ulp_notify = false; + transport->cwnd = asoc->pathmtu; + } + transport->state = SCTP_ACTIVE; + break; + + case SCTP_TRANSPORT_DOWN: + /* If the transport was never confirmed, do not transition it + * to inactive state. Also, release the cached route since + * there may be a better route next time. + */ + if (transport->state != SCTP_UNCONFIRMED) + transport->state = SCTP_INACTIVE; + else { + dst_release(transport->dst); + transport->dst = NULL; + ulp_notify = false; + } + + spc_state = SCTP_ADDR_UNREACHABLE; + break; + + case SCTP_TRANSPORT_PF: + transport->state = SCTP_PF; + ulp_notify = false; + break; + + default: + return; + } + + /* Generate and send a SCTP_PEER_ADDR_CHANGE notification + * to the user. + */ + if (ulp_notify) { + memset(&addr, 0, sizeof(struct sockaddr_storage)); + memcpy(&addr, &transport->ipaddr, + transport->af_specific->sockaddr_len); + + event = sctp_ulpevent_make_peer_addr_change(asoc, &addr, + 0, spc_state, error, GFP_ATOMIC); + if (event) + sctp_ulpq_tail_event(&asoc->ulpq, event); + } + + /* Select new active and retran paths. */ + sctp_select_active_and_retran_path(asoc); +} + +/* Hold a reference to an association. */ +void sctp_association_hold(struct sctp_association *asoc) +{ + atomic_inc(&asoc->base.refcnt); +} + +/* Release a reference to an association and cleanup + * if there are no more references. + */ +void sctp_association_put(struct sctp_association *asoc) +{ + if (atomic_dec_and_test(&asoc->base.refcnt)) + sctp_association_destroy(asoc); +} + +/* Allocate the next TSN, Transmission Sequence Number, for the given + * association. + */ +__u32 sctp_association_get_next_tsn(struct sctp_association *asoc) +{ + /* From Section 1.6 Serial Number Arithmetic: + * Transmission Sequence Numbers wrap around when they reach + * 2**32 - 1. That is, the next TSN a DATA chunk MUST use + * after transmitting TSN = 2*32 - 1 is TSN = 0. + */ + __u32 retval = asoc->next_tsn; + asoc->next_tsn++; + asoc->unack_data++; + + return retval; +} + +/* Compare two addresses to see if they match. Wildcard addresses + * only match themselves. + */ +int sctp_cmp_addr_exact(const union sctp_addr *ss1, + const union sctp_addr *ss2) +{ + struct sctp_af *af; + + af = sctp_get_af_specific(ss1->sa.sa_family); + if (unlikely(!af)) + return 0; + + return af->cmp_addr(ss1, ss2); +} + +/* Return an ecne chunk to get prepended to a packet. + * Note: We are sly and return a shared, prealloced chunk. FIXME: + * No we don't, but we could/should. + */ +struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) +{ + if (!asoc->need_ecne) + return NULL; + + /* Send ECNE if needed. + * Not being able to allocate a chunk here is not deadly. + */ + return sctp_make_ecne(asoc, asoc->last_ecne_tsn); +} + +/* + * Find which transport this TSN was sent on. + */ +struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, + __u32 tsn) +{ + struct sctp_transport *active; + struct sctp_transport *match; + struct sctp_transport *transport; + struct sctp_chunk *chunk; + __be32 key = htonl(tsn); + + match = NULL; + + /* + * FIXME: In general, find a more efficient data structure for + * searching. + */ + + /* + * The general strategy is to search each transport's transmitted + * list. Return which transport this TSN lives on. + * + * Let's be hopeful and check the active_path first. + * Another optimization would be to know if there is only one + * outbound path and not have to look for the TSN at all. + * + */ + + active = asoc->peer.active_path; + + list_for_each_entry(chunk, &active->transmitted, + transmitted_list) { + + if (key == chunk->subh.data_hdr->tsn) { + match = active; + goto out; + } + } + + /* If not found, go search all the other transports. */ + list_for_each_entry(transport, &asoc->peer.transport_addr_list, + transports) { + + if (transport == active) + continue; + list_for_each_entry(chunk, &transport->transmitted, + transmitted_list) { + if (key == chunk->subh.data_hdr->tsn) { + match = transport; + goto out; + } + } + } +out: + return match; +} + +/* Is this the association we are looking for? */ +struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, + struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr) +{ + struct sctp_transport *transport; + + if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) && + (htons(asoc->peer.port) == paddr->v4.sin_port) && + net_eq(sock_net(asoc->base.sk), net)) { + transport = sctp_assoc_lookup_paddr(asoc, paddr); + if (!transport) + goto out; + + if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, + sctp_sk(asoc->base.sk))) + goto out; + } + transport = NULL; + +out: + return transport; +} + +/* Do delayed input processing. This is scheduled by sctp_rcv(). */ +static void sctp_assoc_bh_rcv(struct work_struct *work) +{ + struct sctp_association *asoc = + container_of(work, struct sctp_association, + base.inqueue.immediate); + struct net *net = sock_net(asoc->base.sk); + struct sctp_endpoint *ep; + struct sctp_chunk *chunk; + struct sctp_inq *inqueue; + int state; + sctp_subtype_t subtype; + int error = 0; + + /* The association should be held so we should be safe. */ + ep = asoc->ep; + + inqueue = &asoc->base.inqueue; + sctp_association_hold(asoc); + while (NULL != (chunk = sctp_inq_pop(inqueue))) { + state = asoc->state; + subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + + /* SCTP-AUTH, Section 6.3: + * The receiver has a list of chunk types which it expects + * to be received only after an AUTH-chunk. This list has + * been sent to the peer during the association setup. It + * MUST silently discard these chunks if they are not placed + * after an AUTH chunk in the packet. + */ + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; + + /* Remember where the last DATA chunk came from so we + * know where to send the SACK. + */ + if (sctp_chunk_is_data(chunk)) + asoc->peer.last_data_from = chunk->transport; + else { + SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS); + asoc->stats.ictrlchunks++; + if (chunk->chunk_hdr->type == SCTP_CID_SACK) + asoc->stats.isacks++; + } + + if (chunk->transport) + chunk->transport->last_time_heard = ktime_get(); + + /* Run through the state machine. */ + error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, + state, ep, asoc, chunk, GFP_ATOMIC); + + /* Check to see if the association is freed in response to + * the incoming chunk. If so, get out of the while loop. + */ + if (asoc->base.dead) + break; + + /* If there is an error on chunk, discard this packet. */ + if (error && chunk) + chunk->pdiscard = 1; + } + sctp_association_put(asoc); +} + +/* This routine moves an association from its old sk to a new sk. */ +void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) +{ + struct sctp_sock *newsp = sctp_sk(newsk); + struct sock *oldsk = assoc->base.sk; + + /* Delete the association from the old endpoint's list of + * associations. + */ + list_del_init(&assoc->asocs); + + /* Decrement the backlog value for a TCP-style socket. */ + if (sctp_style(oldsk, TCP)) + oldsk->sk_ack_backlog--; + + /* Release references to the old endpoint and the sock. */ + sctp_endpoint_put(assoc->ep); + sock_put(assoc->base.sk); + + /* Get a reference to the new endpoint. */ + assoc->ep = newsp->ep; + sctp_endpoint_hold(assoc->ep); + + /* Get a reference to the new sock. */ + assoc->base.sk = newsk; + sock_hold(assoc->base.sk); + + /* Add the association to the new endpoint's list of associations. */ + sctp_endpoint_add_asoc(newsp->ep, assoc); +} + +/* Update an association (possibly from unexpected COOKIE-ECHO processing). */ +void sctp_assoc_update(struct sctp_association *asoc, + struct sctp_association *new) +{ + struct sctp_transport *trans; + struct list_head *pos, *temp; + + /* Copy in new parameters of peer. */ + asoc->c = new->c; + asoc->peer.rwnd = new->peer.rwnd; + asoc->peer.sack_needed = new->peer.sack_needed; + asoc->peer.auth_capable = new->peer.auth_capable; + asoc->peer.i = new->peer.i; + sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, + asoc->peer.i.initial_tsn, GFP_ATOMIC); + + /* Remove any peer addresses not present in the new association. */ + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + trans = list_entry(pos, struct sctp_transport, transports); + if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) { + sctp_assoc_rm_peer(asoc, trans); + continue; + } + + if (asoc->state >= SCTP_STATE_ESTABLISHED) + sctp_transport_reset(trans); + } + + /* If the case is A (association restart), use + * initial_tsn as next_tsn. If the case is B, use + * current next_tsn in case data sent to peer + * has been discarded and needs retransmission. + */ + if (asoc->state >= SCTP_STATE_ESTABLISHED) { + asoc->next_tsn = new->next_tsn; + asoc->ctsn_ack_point = new->ctsn_ack_point; + asoc->adv_peer_ack_point = new->adv_peer_ack_point; + + /* Reinitialize SSN for both local streams + * and peer's streams. + */ + sctp_ssnmap_clear(asoc->ssnmap); + + /* Flush the ULP reassembly and ordered queue. + * Any data there will now be stale and will + * cause problems. + */ + sctp_ulpq_flush(&asoc->ulpq); + + /* reset the overall association error count so + * that the restarted association doesn't get torn + * down on the next retransmission timer. + */ + asoc->overall_error_count = 0; + + } else { + /* Add any peer addresses from the new association. */ + list_for_each_entry(trans, &new->peer.transport_addr_list, + transports) { + if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) + sctp_assoc_add_peer(asoc, &trans->ipaddr, + GFP_ATOMIC, trans->state); + } + + asoc->ctsn_ack_point = asoc->next_tsn - 1; + asoc->adv_peer_ack_point = asoc->ctsn_ack_point; + if (!asoc->ssnmap) { + /* Move the ssnmap. */ + asoc->ssnmap = new->ssnmap; + new->ssnmap = NULL; + } + + if (!asoc->assoc_id) { + /* get a new association id since we don't have one + * yet. + */ + sctp_assoc_set_id(asoc, GFP_ATOMIC); + } + } + + /* SCTP-AUTH: Save the peer parameters from the new associations + * and also move the association shared keys over + */ + kfree(asoc->peer.peer_random); + asoc->peer.peer_random = new->peer.peer_random; + new->peer.peer_random = NULL; + + kfree(asoc->peer.peer_chunks); + asoc->peer.peer_chunks = new->peer.peer_chunks; + new->peer.peer_chunks = NULL; + + kfree(asoc->peer.peer_hmacs); + asoc->peer.peer_hmacs = new->peer.peer_hmacs; + new->peer.peer_hmacs = NULL; + + sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC); +} + +/* Update the retran path for sending a retransmitted packet. + * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: + * + * When there is outbound data to send and the primary path + * becomes inactive (e.g., due to failures), or where the + * SCTP user explicitly requests to send data to an + * inactive destination transport address, before reporting + * an error to its ULP, the SCTP endpoint should try to send + * the data to an alternate active destination transport + * address if one exists. + * + * When retransmitting data that timed out, if the endpoint + * is multihomed, it should consider each source-destination + * address pair in its retransmission selection policy. + * When retransmitting timed-out data, the endpoint should + * attempt to pick the most divergent source-destination + * pair from the original source-destination pair to which + * the packet was transmitted. + * + * Note: Rules for picking the most divergent source-destination + * pair are an implementation decision and are not specified + * within this document. + * + * Our basic strategy is to round-robin transports in priorities + * according to sctp_state_prio_map[] e.g., if no such + * transport with state SCTP_ACTIVE exists, round-robin through + * SCTP_UNKNOWN, etc. You get the picture. + */ +static const u8 sctp_trans_state_to_prio_map[] = { + [SCTP_ACTIVE] = 3, /* best case */ + [SCTP_UNKNOWN] = 2, + [SCTP_PF] = 1, + [SCTP_INACTIVE] = 0, /* worst case */ +}; + +static u8 sctp_trans_score(const struct sctp_transport *trans) +{ + return sctp_trans_state_to_prio_map[trans->state]; +} + +static struct sctp_transport *sctp_trans_elect_tie(struct sctp_transport *trans1, + struct sctp_transport *trans2) +{ + if (trans1->error_count > trans2->error_count) { + return trans2; + } else if (trans1->error_count == trans2->error_count && + ktime_after(trans2->last_time_heard, + trans1->last_time_heard)) { + return trans2; + } else { + return trans1; + } +} + +static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, + struct sctp_transport *best) +{ + u8 score_curr, score_best; + + if (best == NULL || curr == best) + return curr; + + score_curr = sctp_trans_score(curr); + score_best = sctp_trans_score(best); + + /* First, try a score-based selection if both transport states + * differ. If we're in a tie, lets try to make a more clever + * decision here based on error counts and last time heard. + */ + if (score_curr > score_best) + return curr; + else if (score_curr == score_best) + return sctp_trans_elect_tie(curr, best); + else + return best; +} + +void sctp_assoc_update_retran_path(struct sctp_association *asoc) +{ + struct sctp_transport *trans = asoc->peer.retran_path; + struct sctp_transport *trans_next = NULL; + + /* We're done as we only have the one and only path. */ + if (asoc->peer.transport_count == 1) + return; + /* If active_path and retran_path are the same and active, + * then this is the only active path. Use it. + */ + if (asoc->peer.active_path == asoc->peer.retran_path && + asoc->peer.active_path->state == SCTP_ACTIVE) + return; + + /* Iterate from retran_path's successor back to retran_path. */ + for (trans = list_next_entry(trans, transports); 1; + trans = list_next_entry(trans, transports)) { + /* Manually skip the head element. */ + if (&trans->transports == &asoc->peer.transport_addr_list) + continue; + if (trans->state == SCTP_UNCONFIRMED) + continue; + trans_next = sctp_trans_elect_best(trans, trans_next); + /* Active is good enough for immediate return. */ + if (trans_next->state == SCTP_ACTIVE) + break; + /* We've reached the end, time to update path. */ + if (trans == asoc->peer.retran_path) + break; + } + + asoc->peer.retran_path = trans_next; + + pr_debug("%s: association:%p updated new path to addr:%pISpc\n", + __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); +} + +static void sctp_select_active_and_retran_path(struct sctp_association *asoc) +{ + struct sctp_transport *trans, *trans_pri = NULL, *trans_sec = NULL; + struct sctp_transport *trans_pf = NULL; + + /* Look for the two most recently used active transports. */ + list_for_each_entry(trans, &asoc->peer.transport_addr_list, + transports) { + /* Skip uninteresting transports. */ + if (trans->state == SCTP_INACTIVE || + trans->state == SCTP_UNCONFIRMED) + continue; + /* Keep track of the best PF transport from our + * list in case we don't find an active one. + */ + if (trans->state == SCTP_PF) { + trans_pf = sctp_trans_elect_best(trans, trans_pf); + continue; + } + /* For active transports, pick the most recent ones. */ + if (trans_pri == NULL || + ktime_after(trans->last_time_heard, + trans_pri->last_time_heard)) { + trans_sec = trans_pri; + trans_pri = trans; + } else if (trans_sec == NULL || + ktime_after(trans->last_time_heard, + trans_sec->last_time_heard)) { + trans_sec = trans; + } + } + + /* RFC 2960 6.4 Multi-Homed SCTP Endpoints + * + * By default, an endpoint should always transmit to the primary + * path, unless the SCTP user explicitly specifies the + * destination transport address (and possibly source transport + * address) to use. [If the primary is active but not most recent, + * bump the most recently used transport.] + */ + if ((asoc->peer.primary_path->state == SCTP_ACTIVE || + asoc->peer.primary_path->state == SCTP_UNKNOWN) && + asoc->peer.primary_path != trans_pri) { + trans_sec = trans_pri; + trans_pri = asoc->peer.primary_path; + } + + /* We did not find anything useful for a possible retransmission + * path; either primary path that we found is the the same as + * the current one, or we didn't generally find an active one. + */ + if (trans_sec == NULL) + trans_sec = trans_pri; + + /* If we failed to find a usable transport, just camp on the + * active or pick a PF iff it's the better choice. + */ + if (trans_pri == NULL) { + trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf); + trans_sec = trans_pri; + } + + /* Set the active and retran transports. */ + asoc->peer.active_path = trans_pri; + asoc->peer.retran_path = trans_sec; +} + +struct sctp_transport * +sctp_assoc_choose_alter_transport(struct sctp_association *asoc, + struct sctp_transport *last_sent_to) +{ + /* If this is the first time packet is sent, use the active path, + * else use the retran path. If the last packet was sent over the + * retran path, update the retran path and use it. + */ + if (last_sent_to == NULL) { + return asoc->peer.active_path; + } else { + if (last_sent_to == asoc->peer.retran_path) + sctp_assoc_update_retran_path(asoc); + + return asoc->peer.retran_path; + } +} + +/* Update the association's pmtu and frag_point by going through all the + * transports. This routine is called when a transport's PMTU has changed. + */ +void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc) +{ + struct sctp_transport *t; + __u32 pmtu = 0; + + if (!asoc) + return; + + /* Get the lowest pmtu of all the transports. */ + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + if (t->pmtu_pending && t->dst) { + sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst)); + t->pmtu_pending = 0; + } + if (!pmtu || (t->pathmtu < pmtu)) + pmtu = t->pathmtu; + } + + if (pmtu) { + asoc->pathmtu = pmtu; + asoc->frag_point = sctp_frag_point(asoc, pmtu); + } + + pr_debug("%s: asoc:%p, pmtu:%d, frag_point:%d\n", __func__, asoc, + asoc->pathmtu, asoc->frag_point); +} + +/* Should we send a SACK to update our peer? */ +static inline bool sctp_peer_needs_update(struct sctp_association *asoc) +{ + struct net *net = sock_net(asoc->base.sk); + switch (asoc->state) { + case SCTP_STATE_ESTABLISHED: + case SCTP_STATE_SHUTDOWN_PENDING: + case SCTP_STATE_SHUTDOWN_RECEIVED: + case SCTP_STATE_SHUTDOWN_SENT: + if ((asoc->rwnd > asoc->a_rwnd) && + ((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32, + (asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift), + asoc->pathmtu))) + return true; + break; + default: + break; + } + return false; +} + +/* Increase asoc's rwnd by len and send any window update SACK if needed. */ +void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len) +{ + struct sctp_chunk *sack; + struct timer_list *timer; + + if (asoc->rwnd_over) { + if (asoc->rwnd_over >= len) { + asoc->rwnd_over -= len; + } else { + asoc->rwnd += (len - asoc->rwnd_over); + asoc->rwnd_over = 0; + } + } else { + asoc->rwnd += len; + } + + /* If we had window pressure, start recovering it + * once our rwnd had reached the accumulated pressure + * threshold. The idea is to recover slowly, but up + * to the initial advertised window. + */ + if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) { + int change = min(asoc->pathmtu, asoc->rwnd_press); + asoc->rwnd += change; + asoc->rwnd_press -= change; + } + + pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n", + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, + asoc->a_rwnd); + + /* Send a window update SACK if the rwnd has increased by at least the + * minimum of the association's PMTU and half of the receive buffer. + * The algorithm used is similar to the one described in + * Section 4.2.3.3 of RFC 1122. + */ + if (sctp_peer_needs_update(asoc)) { + asoc->a_rwnd = asoc->rwnd; + + pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u " + "a_rwnd:%u\n", __func__, asoc, asoc->rwnd, + asoc->a_rwnd); + + sack = sctp_make_sack(asoc); + if (!sack) + return; + + asoc->peer.sack_needed = 0; + + sctp_outq_tail(&asoc->outqueue, sack); + + /* Stop the SACK timer. */ + timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; + if (del_timer(timer)) + sctp_association_put(asoc); + } +} + +/* Decrease asoc's rwnd by len. */ +void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len) +{ + int rx_count; + int over = 0; + + if (unlikely(!asoc->rwnd || asoc->rwnd_over)) + pr_debug("%s: association:%p has asoc->rwnd:%u, " + "asoc->rwnd_over:%u!\n", __func__, asoc, + asoc->rwnd, asoc->rwnd_over); + + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); + + /* If we've reached or overflowed our receive buffer, announce + * a 0 rwnd if rwnd would still be positive. Store the + * the potential pressure overflow so that the window can be restored + * back to original value. + */ + if (rx_count >= asoc->base.sk->sk_rcvbuf) + over = 1; + + if (asoc->rwnd >= len) { + asoc->rwnd -= len; + if (over) { + asoc->rwnd_press += asoc->rwnd; + asoc->rwnd = 0; + } + } else { + asoc->rwnd_over = len - asoc->rwnd; + asoc->rwnd = 0; + } + + pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n", + __func__, asoc, len, asoc->rwnd, asoc->rwnd_over, + asoc->rwnd_press); +} + +/* Build the bind address list for the association based on info from the + * local endpoint and the remote peer. + */ +int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, + sctp_scope_t scope, gfp_t gfp) +{ + int flags; + + /* Use scoping rules to determine the subset of addresses from + * the endpoint. + */ + flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; + if (asoc->peer.ipv4_address) + flags |= SCTP_ADDR4_PEERSUPP; + if (asoc->peer.ipv6_address) + flags |= SCTP_ADDR6_PEERSUPP; + + return sctp_bind_addr_copy(sock_net(asoc->base.sk), + &asoc->base.bind_addr, + &asoc->ep->base.bind_addr, + scope, gfp, flags); +} + +/* Build the association's bind address list from the cookie. */ +int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, + struct sctp_cookie *cookie, + gfp_t gfp) +{ + int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); + int var_size3 = cookie->raw_addr_list_len; + __u8 *raw = (__u8 *)cookie->peer_init + var_size2; + + return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, + asoc->ep->base.bind_addr.port, gfp); +} + +/* Lookup laddr in the bind address list of an association. */ +int sctp_assoc_lookup_laddr(struct sctp_association *asoc, + const union sctp_addr *laddr) +{ + int found = 0; + + if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && + sctp_bind_addr_match(&asoc->base.bind_addr, laddr, + sctp_sk(asoc->base.sk))) + found = 1; + + return found; +} + +/* Set an association id for a given association */ +int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp) +{ + bool preload = !!(gfp & __GFP_WAIT); + int ret; + + /* If the id is already assigned, keep it. */ + if (asoc->assoc_id) + return 0; + + if (preload) + idr_preload(gfp); + spin_lock_bh(&sctp_assocs_id_lock); + /* 0 is not a valid assoc_id, must be >= 1 */ + ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT); + spin_unlock_bh(&sctp_assocs_id_lock); + if (preload) + idr_preload_end(); + if (ret < 0) + return ret; + + asoc->assoc_id = (sctp_assoc_t)ret; + return 0; +} + +/* Free the ASCONF queue */ +static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc) +{ + struct sctp_chunk *asconf; + struct sctp_chunk *tmp; + + list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) { + list_del_init(&asconf->list); + sctp_chunk_free(asconf); + } +} + +/* Free asconf_ack cache */ +static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) +{ + struct sctp_chunk *ack; + struct sctp_chunk *tmp; + + list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, + transmitted_list) { + list_del_init(&ack->transmitted_list); + sctp_chunk_free(ack); + } +} + +/* Clean up the ASCONF_ACK queue */ +void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc) +{ + struct sctp_chunk *ack; + struct sctp_chunk *tmp; + + /* We can remove all the entries from the queue up to + * the "Peer-Sequence-Number". + */ + list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, + transmitted_list) { + if (ack->subh.addip_hdr->serial == + htonl(asoc->peer.addip_serial)) + break; + + list_del_init(&ack->transmitted_list); + sctp_chunk_free(ack); + } +} + +/* Find the ASCONF_ACK whose serial number matches ASCONF */ +struct sctp_chunk *sctp_assoc_lookup_asconf_ack( + const struct sctp_association *asoc, + __be32 serial) +{ + struct sctp_chunk *ack; + + /* Walk through the list of cached ASCONF-ACKs and find the + * ack chunk whose serial number matches that of the request. + */ + list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { + if (sctp_chunk_pending(ack)) + continue; + if (ack->subh.addip_hdr->serial == serial) { + sctp_chunk_hold(ack); + return ack; + } + } + + return NULL; +} + +void sctp_asconf_queue_teardown(struct sctp_association *asoc) +{ + /* Free any cached ASCONF_ACK chunk. */ + sctp_assoc_free_asconf_acks(asoc); + + /* Free the ASCONF queue. */ + sctp_assoc_free_asconf_queue(asoc); + + /* Free any cached ASCONF chunk. */ + if (asoc->addip_last_asconf) + sctp_chunk_free(asoc->addip_last_asconf); +} diff --git a/net/sctp/auth.c b/net/sctp/auth.c new file mode 100644 index 000000000..4f15b7d73 --- /dev/null +++ b/net/sctp/auth.c @@ -0,0 +1,953 @@ +/* SCTP kernel implementation + * (C) Copyright 2007 Hewlett-Packard Development Company, L.P. + * + * This file is part of the SCTP kernel implementation + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Vlad Yasevich <vladislav.yasevich@hp.com> + */ + +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/crypto.h> +#include <linux/scatterlist.h> +#include <net/sctp/sctp.h> +#include <net/sctp/auth.h> + +static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = { + { + /* id 0 is reserved. as all 0 */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_0, + }, + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA1, + .hmac_name = "hmac(sha1)", + .hmac_len = SCTP_SHA1_SIG_SIZE, + }, + { + /* id 2 is reserved as well */ + .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2, + }, +#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE) + { + .hmac_id = SCTP_AUTH_HMAC_ID_SHA256, + .hmac_name = "hmac(sha256)", + .hmac_len = SCTP_SHA256_SIG_SIZE, + } +#endif +}; + + +void sctp_auth_key_put(struct sctp_auth_bytes *key) +{ + if (!key) + return; + + if (atomic_dec_and_test(&key->refcnt)) { + kzfree(key); + SCTP_DBG_OBJCNT_DEC(keys); + } +} + +/* Create a new key structure of a given length */ +static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp) +{ + struct sctp_auth_bytes *key; + + /* Verify that we are not going to overflow INT_MAX */ + if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes))) + return NULL; + + /* Allocate the shared key */ + key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp); + if (!key) + return NULL; + + key->len = key_len; + atomic_set(&key->refcnt, 1); + SCTP_DBG_OBJCNT_INC(keys); + + return key; +} + +/* Create a new shared key container with a give key id */ +struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp) +{ + struct sctp_shared_key *new; + + /* Allocate the shared key container */ + new = kzalloc(sizeof(struct sctp_shared_key), gfp); + if (!new) + return NULL; + + INIT_LIST_HEAD(&new->key_list); + new->key_id = key_id; + + return new; +} + +/* Free the shared key structure */ +static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) +{ + BUG_ON(!list_empty(&sh_key->key_list)); + sctp_auth_key_put(sh_key->key); + sh_key->key = NULL; + kfree(sh_key); +} + +/* Destroy the entire key list. This is done during the + * associon and endpoint free process. + */ +void sctp_auth_destroy_keys(struct list_head *keys) +{ + struct sctp_shared_key *ep_key; + struct sctp_shared_key *tmp; + + if (list_empty(keys)) + return; + + key_for_each_safe(ep_key, tmp, keys) { + list_del_init(&ep_key->key_list); + sctp_auth_shkey_free(ep_key); + } +} + +/* Compare two byte vectors as numbers. Return values + * are: + * 0 - vectors are equal + * < 0 - vector 1 is smaller than vector2 + * > 0 - vector 1 is greater than vector2 + * + * Algorithm is: + * This is performed by selecting the numerically smaller key vector... + * If the key vectors are equal as numbers but differ in length ... + * the shorter vector is considered smaller + * + * Examples (with small values): + * 000123456789 > 123456789 (first number is longer) + * 000123456789 < 234567891 (second number is larger numerically) + * 123456789 > 2345678 (first number is both larger & longer) + */ +static int sctp_auth_compare_vectors(struct sctp_auth_bytes *vector1, + struct sctp_auth_bytes *vector2) +{ + int diff; + int i; + const __u8 *longer; + + diff = vector1->len - vector2->len; + if (diff) { + longer = (diff > 0) ? vector1->data : vector2->data; + + /* Check to see if the longer number is + * lead-zero padded. If it is not, it + * is automatically larger numerically. + */ + for (i = 0; i < abs(diff); i++) { + if (longer[i] != 0) + return diff; + } + } + + /* lengths are the same, compare numbers */ + return memcmp(vector1->data, vector2->data, vector1->len); +} + +/* + * Create a key vector as described in SCTP-AUTH, Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ +static struct sctp_auth_bytes *sctp_auth_make_key_vector( + sctp_random_param_t *random, + sctp_chunks_param_t *chunks, + sctp_hmac_algo_param_t *hmacs, + gfp_t gfp) +{ + struct sctp_auth_bytes *new; + __u32 len; + __u32 offset = 0; + __u16 random_len, hmacs_len, chunks_len = 0; + + random_len = ntohs(random->param_hdr.length); + hmacs_len = ntohs(hmacs->param_hdr.length); + if (chunks) + chunks_len = ntohs(chunks->param_hdr.length); + + len = random_len + hmacs_len + chunks_len; + + new = sctp_auth_create_key(len, gfp); + if (!new) + return NULL; + + memcpy(new->data, random, random_len); + offset += random_len; + + if (chunks) { + memcpy(new->data + offset, chunks, chunks_len); + offset += chunks_len; + } + + memcpy(new->data + offset, hmacs, hmacs_len); + + return new; +} + + +/* Make a key vector based on our local parameters */ +static struct sctp_auth_bytes *sctp_auth_make_local_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector( + (sctp_random_param_t *)asoc->c.auth_random, + (sctp_chunks_param_t *)asoc->c.auth_chunks, + (sctp_hmac_algo_param_t *)asoc->c.auth_hmacs, + gfp); +} + +/* Make a key vector based on peer's parameters */ +static struct sctp_auth_bytes *sctp_auth_make_peer_vector( + const struct sctp_association *asoc, + gfp_t gfp) +{ + return sctp_auth_make_key_vector(asoc->peer.peer_random, + asoc->peer.peer_chunks, + asoc->peer.peer_hmacs, + gfp); +} + + +/* Set the value of the association shared key base on the parameters + * given. The algorithm is: + * From the endpoint pair shared keys and the key vectors the + * association shared keys are computed. This is performed by selecting + * the numerically smaller key vector and concatenating it to the + * endpoint pair shared key, and then concatenating the numerically + * larger key vector to that. The result of the concatenation is the + * association shared key. + */ +static struct sctp_auth_bytes *sctp_auth_asoc_set_secret( + struct sctp_shared_key *ep_key, + struct sctp_auth_bytes *first_vector, + struct sctp_auth_bytes *last_vector, + gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + __u32 offset = 0; + __u32 auth_len; + + auth_len = first_vector->len + last_vector->len; + if (ep_key->key) + auth_len += ep_key->key->len; + + secret = sctp_auth_create_key(auth_len, gfp); + if (!secret) + return NULL; + + if (ep_key->key) { + memcpy(secret->data, ep_key->key->data, ep_key->key->len); + offset += ep_key->key->len; + } + + memcpy(secret->data + offset, first_vector->data, first_vector->len); + offset += first_vector->len; + + memcpy(secret->data + offset, last_vector->data, last_vector->len); + + return secret; +} + +/* Create an association shared key. Follow the algorithm + * described in SCTP-AUTH, Section 6.1 + */ +static struct sctp_auth_bytes *sctp_auth_asoc_create_secret( + const struct sctp_association *asoc, + struct sctp_shared_key *ep_key, + gfp_t gfp) +{ + struct sctp_auth_bytes *local_key_vector; + struct sctp_auth_bytes *peer_key_vector; + struct sctp_auth_bytes *first_vector, + *last_vector; + struct sctp_auth_bytes *secret = NULL; + int cmp; + + + /* Now we need to build the key vectors + * SCTP-AUTH , Section 6.1 + * The RANDOM parameter, the CHUNKS parameter and the HMAC-ALGO + * parameter sent by each endpoint are concatenated as byte vectors. + * These parameters include the parameter type, parameter length, and + * the parameter value, but padding is omitted; all padding MUST be + * removed from this concatenation before proceeding with further + * computation of keys. Parameters which were not sent are simply + * omitted from the concatenation process. The resulting two vectors + * are called the two key vectors. + */ + + local_key_vector = sctp_auth_make_local_vector(asoc, gfp); + peer_key_vector = sctp_auth_make_peer_vector(asoc, gfp); + + if (!peer_key_vector || !local_key_vector) + goto out; + + /* Figure out the order in which the key_vectors will be + * added to the endpoint shared key. + * SCTP-AUTH, Section 6.1: + * This is performed by selecting the numerically smaller key + * vector and concatenating it to the endpoint pair shared + * key, and then concatenating the numerically larger key + * vector to that. If the key vectors are equal as numbers + * but differ in length, then the concatenation order is the + * endpoint shared key, followed by the shorter key vector, + * followed by the longer key vector. Otherwise, the key + * vectors are identical, and may be concatenated to the + * endpoint pair key in any order. + */ + cmp = sctp_auth_compare_vectors(local_key_vector, + peer_key_vector); + if (cmp < 0) { + first_vector = local_key_vector; + last_vector = peer_key_vector; + } else { + first_vector = peer_key_vector; + last_vector = local_key_vector; + } + + secret = sctp_auth_asoc_set_secret(ep_key, first_vector, last_vector, + gfp); +out: + sctp_auth_key_put(local_key_vector); + sctp_auth_key_put(peer_key_vector); + + return secret; +} + +/* + * Populate the association overlay list with the list + * from the endpoint. + */ +int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep, + struct sctp_association *asoc, + gfp_t gfp) +{ + struct sctp_shared_key *sh_key; + struct sctp_shared_key *new; + + BUG_ON(!list_empty(&asoc->endpoint_shared_keys)); + + key_for_each(sh_key, &ep->endpoint_shared_keys) { + new = sctp_auth_shkey_create(sh_key->key_id, gfp); + if (!new) + goto nomem; + + new->key = sh_key->key; + sctp_auth_key_hold(new->key); + list_add(&new->key_list, &asoc->endpoint_shared_keys); + } + + return 0; + +nomem: + sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); + return -ENOMEM; +} + + +/* Public interface to create the association shared key. + * See code above for the algorithm. + */ +int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_auth_bytes *secret; + struct sctp_shared_key *ep_key; + struct sctp_chunk *chunk; + + /* If we don't support AUTH, or peer is not capable + * we don't need to do anything. + */ + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) + return 0; + + /* If the key_id is non-zero and we couldn't find an + * endpoint pair shared key, we can't compute the + * secret. + * For key_id 0, endpoint pair shared key is a NULL key. + */ + ep_key = sctp_auth_get_shkey(asoc, asoc->active_key_id); + BUG_ON(!ep_key); + + secret = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!secret) + return -ENOMEM; + + sctp_auth_key_put(asoc->asoc_shared_key); + asoc->asoc_shared_key = secret; + + /* Update send queue in case any chunk already in there now + * needs authenticating + */ + list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) { + if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc)) + chunk->auth = 1; + } + + return 0; +} + + +/* Find the endpoint pair shared key based on the key_id */ +struct sctp_shared_key *sctp_auth_get_shkey( + const struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + + /* First search associations set of endpoint pair shared keys */ + key_for_each(key, &asoc->endpoint_shared_keys) { + if (key->key_id == key_id) + return key; + } + + return NULL; +} + +/* + * Initialize all the possible digest transforms that we can use. Right now + * now, the supported digests are SHA1 and SHA256. We do this here once + * because of the restrictiong that transforms may only be allocated in + * user context. This forces us to pre-allocated all possible transforms + * at the endpoint init time. + */ +int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp) +{ + struct crypto_hash *tfm = NULL; + __u16 id; + + /* If AUTH extension is disabled, we are done */ + if (!ep->auth_enable) { + ep->auth_hmacs = NULL; + return 0; + } + + /* If the transforms are already allocated, we are done */ + if (ep->auth_hmacs) + return 0; + + /* Allocated the array of pointers to transorms */ + ep->auth_hmacs = kzalloc( + sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS, + gfp); + if (!ep->auth_hmacs) + return -ENOMEM; + + for (id = 0; id < SCTP_AUTH_NUM_HMACS; id++) { + + /* See is we support the id. Supported IDs have name and + * length fields set, so that we can allocated and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (!sctp_hmac_list[id].hmac_name) + continue; + + /* If this TFM has been allocated, we are all set */ + if (ep->auth_hmacs[id]) + continue; + + /* Allocate the ID */ + tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0, + CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) + goto out_err; + + ep->auth_hmacs[id] = tfm; + } + + return 0; + +out_err: + /* Clean up any successful allocations */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + return -ENOMEM; +} + +/* Destroy the hmac tfm array */ +void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]) +{ + int i; + + if (!auth_hmacs) + return; + + for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) { + if (auth_hmacs[i]) + crypto_free_hash(auth_hmacs[i]); + } + kfree(auth_hmacs); +} + + +struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id) +{ + return &sctp_hmac_list[hmac_id]; +} + +/* Get an hmac description information that we can use to build + * the AUTH chunk + */ +struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + __u16 id = 0; + int i; + + /* If we have a default entry, use it */ + if (asoc->default_hmac_id) + return &sctp_hmac_list[asoc->default_hmac_id]; + + /* Since we do not have a default entry, find the first entry + * we support and return that. Do not cache that id. + */ + hmacs = asoc->peer.peer_hmacs; + if (!hmacs) + return NULL; + + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + for (i = 0; i < n_elt; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range. And + * see if we support the id. Supported IDs have name and + * length fields set, so that we can allocate and use + * them. We can safely just check for name, for without the + * name, we can't allocate the TFM. + */ + if (id > SCTP_AUTH_HMAC_ID_MAX || + !sctp_hmac_list[id].hmac_name) { + id = 0; + continue; + } + + break; + } + + if (id == 0) + return NULL; + + return &sctp_hmac_list[id]; +} + +static int __sctp_auth_find_hmacid(__be16 *hmacs, int n_elts, __be16 hmac_id) +{ + int found = 0; + int i; + + for (i = 0; i < n_elts; i++) { + if (hmac_id == hmacs[i]) { + found = 1; + break; + } + } + + return found; +} + +/* See if the HMAC_ID is one that we claim as supported */ +int sctp_auth_asoc_verify_hmac_id(const struct sctp_association *asoc, + __be16 hmac_id) +{ + struct sctp_hmac_algo_param *hmacs; + __u16 n_elt; + + if (!asoc) + return 0; + + hmacs = (struct sctp_hmac_algo_param *)asoc->c.auth_hmacs; + n_elt = (ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t)) >> 1; + + return __sctp_auth_find_hmacid(hmacs->hmac_ids, n_elt, hmac_id); +} + + +/* Cache the default HMAC id. This to follow this text from SCTP-AUTH: + * Section 6.1: + * The receiver of a HMAC-ALGO parameter SHOULD use the first listed + * algorithm it supports. + */ +void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc, + struct sctp_hmac_algo_param *hmacs) +{ + struct sctp_endpoint *ep; + __u16 id; + int i; + int n_params; + + /* if the default id is already set, use it */ + if (asoc->default_hmac_id) + return; + + n_params = (ntohs(hmacs->param_hdr.length) + - sizeof(sctp_paramhdr_t)) >> 1; + ep = asoc->ep; + for (i = 0; i < n_params; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + /* Check the id is in the supported range */ + if (id > SCTP_AUTH_HMAC_ID_MAX) + continue; + + /* If this TFM has been allocated, use this id */ + if (ep->auth_hmacs[id]) { + asoc->default_hmac_id = id; + break; + } + } +} + + +/* Check to see if the given chunk is supposed to be authenticated */ +static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) +{ + unsigned short len; + int found = 0; + int i; + + if (!param || param->param_hdr.length == 0) + return 0; + + len = ntohs(param->param_hdr.length) - sizeof(sctp_paramhdr_t); + + /* SCTP-AUTH, Section 3.2 + * The chunk types for INIT, INIT-ACK, SHUTDOWN-COMPLETE and AUTH + * chunks MUST NOT be listed in the CHUNKS parameter. However, if + * a CHUNKS parameter is received then the types for INIT, INIT-ACK, + * SHUTDOWN-COMPLETE and AUTH chunks MUST be ignored. + */ + for (i = 0; !found && i < len; i++) { + switch (param->chunks[i]) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + break; + + default: + if (param->chunks[i] == chunk) + found = 1; + break; + } + } + + return found; +} + +/* Check if peer requested that this chunk is authenticated */ +int sctp_auth_send_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!asoc) + return 0; + + if (!asoc->ep->auth_enable || !asoc->peer.auth_capable) + return 0; + + return __sctp_auth_cid(chunk, asoc->peer.peer_chunks); +} + +/* Check if we requested that peer authenticate this chunk. */ +int sctp_auth_recv_cid(sctp_cid_t chunk, const struct sctp_association *asoc) +{ + if (!asoc) + return 0; + + if (!asoc->ep->auth_enable) + return 0; + + return __sctp_auth_cid(chunk, + (struct sctp_chunks_param *)asoc->c.auth_chunks); +} + +/* SCTP-AUTH: Section 6.2: + * The sender MUST calculate the MAC as described in RFC2104 [2] using + * the hash function H as described by the MAC Identifier and the shared + * association key K based on the endpoint pair shared key described by + * the shared key identifier. The 'data' used for the computation of + * the AUTH-chunk is given by the AUTH chunk with its HMAC field set to + * zero (as shown in Figure 6) followed by all chunks that are placed + * after the AUTH chunk in the SCTP packet. + */ +void sctp_auth_calculate_hmac(const struct sctp_association *asoc, + struct sk_buff *skb, + struct sctp_auth_chunk *auth, + gfp_t gfp) +{ + struct scatterlist sg; + struct hash_desc desc; + struct sctp_auth_bytes *asoc_key; + __u16 key_id, hmac_id; + __u8 *digest; + unsigned char *end; + int free_key = 0; + + /* Extract the info we need: + * - hmac id + * - key id + */ + key_id = ntohs(auth->auth_hdr.shkey_id); + hmac_id = ntohs(auth->auth_hdr.hmac_id); + + if (key_id == asoc->active_key_id) + asoc_key = asoc->asoc_shared_key; + else { + struct sctp_shared_key *ep_key; + + ep_key = sctp_auth_get_shkey(asoc, key_id); + if (!ep_key) + return; + + asoc_key = sctp_auth_asoc_create_secret(asoc, ep_key, gfp); + if (!asoc_key) + return; + + free_key = 1; + } + + /* set up scatter list */ + end = skb_tail_pointer(skb); + sg_init_one(&sg, auth, end - (unsigned char *)auth); + + desc.tfm = asoc->ep->auth_hmacs[hmac_id]; + desc.flags = 0; + + digest = auth->auth_hdr.hmac; + if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len)) + goto free; + + crypto_hash_digest(&desc, &sg, sg.length, digest); + +free: + if (free_key) + sctp_auth_key_put(asoc_key); +} + +/* API Helpers */ + +/* Add a chunk to the endpoint authenticated chunk list */ +int sctp_auth_ep_add_chunkid(struct sctp_endpoint *ep, __u8 chunk_id) +{ + struct sctp_chunks_param *p = ep->auth_chunk_list; + __u16 nchunks; + __u16 param_len; + + /* If this chunk is already specified, we are done */ + if (__sctp_auth_cid(chunk_id, p)) + return 0; + + /* Check if we can add this chunk to the array */ + param_len = ntohs(p->param_hdr.length); + nchunks = param_len - sizeof(sctp_paramhdr_t); + if (nchunks == SCTP_NUM_CHUNK_TYPES) + return -EINVAL; + + p->chunks[nchunks] = chunk_id; + p->param_hdr.length = htons(param_len + 1); + return 0; +} + +/* Add hmac identifires to the endpoint list of supported hmac ids */ +int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep, + struct sctp_hmacalgo *hmacs) +{ + int has_sha1 = 0; + __u16 id; + int i; + + /* Scan the list looking for unsupported id. Also make sure that + * SHA1 is specified. + */ + for (i = 0; i < hmacs->shmac_num_idents; i++) { + id = hmacs->shmac_idents[i]; + + if (id > SCTP_AUTH_HMAC_ID_MAX) + return -EOPNOTSUPP; + + if (SCTP_AUTH_HMAC_ID_SHA1 == id) + has_sha1 = 1; + + if (!sctp_hmac_list[id].hmac_name) + return -EOPNOTSUPP; + } + + if (!has_sha1) + return -EINVAL; + + memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0], + hmacs->shmac_num_idents * sizeof(__u16)); + ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + + hmacs->shmac_num_idents * sizeof(__u16)); + return 0; +} + +/* Set a new shared key on either endpoint or association. If the + * the key with a same ID already exists, replace the key (remove the + * old key and add a new one). + */ +int sctp_auth_set_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + struct sctp_authkey *auth_key) +{ + struct sctp_shared_key *cur_key = NULL; + struct sctp_auth_bytes *key; + struct list_head *sh_keys; + int replace = 0; + + /* Try to find the given key id to see if + * we are doing a replace, or adding a new key + */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(cur_key, sh_keys) { + if (cur_key->key_id == auth_key->sca_keynumber) { + replace = 1; + break; + } + } + + /* If we are not replacing a key id, we need to allocate + * a shared key. + */ + if (!replace) { + cur_key = sctp_auth_shkey_create(auth_key->sca_keynumber, + GFP_KERNEL); + if (!cur_key) + return -ENOMEM; + } + + /* Create a new key data based on the info passed in */ + key = sctp_auth_create_key(auth_key->sca_keylength, GFP_KERNEL); + if (!key) + goto nomem; + + memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength); + + /* If we are replacing, remove the old keys data from the + * key id. If we are adding new key id, add it to the + * list. + */ + if (replace) + sctp_auth_key_put(cur_key->key); + else + list_add(&cur_key->key_list, sh_keys); + + cur_key->key = key; + return 0; +nomem: + if (!replace) + sctp_auth_shkey_free(cur_key); + + return -ENOMEM; +} + +int sctp_auth_set_active_key(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST correst to an existing key */ + if (asoc) + sh_keys = &asoc->endpoint_shared_keys; + else + sh_keys = &ep->endpoint_shared_keys; + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + if (asoc) { + asoc->active_key_id = key_id; + sctp_auth_asoc_init_active_key(asoc, GFP_KERNEL); + } else + ep->active_key_id = key_id; + + return 0; +} + +int sctp_auth_del_key_id(struct sctp_endpoint *ep, + struct sctp_association *asoc, + __u16 key_id) +{ + struct sctp_shared_key *key; + struct list_head *sh_keys; + int found = 0; + + /* The key identifier MUST NOT be the current active key + * The key identifier MUST correst to an existing key + */ + if (asoc) { + if (asoc->active_key_id == key_id) + return -EINVAL; + + sh_keys = &asoc->endpoint_shared_keys; + } else { + if (ep->active_key_id == key_id) + return -EINVAL; + + sh_keys = &ep->endpoint_shared_keys; + } + + key_for_each(key, sh_keys) { + if (key->key_id == key_id) { + found = 1; + break; + } + } + + if (!found) + return -EINVAL; + + /* Delete the shared key */ + list_del_init(&key->key_list); + sctp_auth_shkey_free(key); + + return 0; +} diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c new file mode 100644 index 000000000..871cdf956 --- /dev/null +++ b/net/sctp/bind_addr.c @@ -0,0 +1,552 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2003 + * Copyright (c) Cisco 1999,2000 + * Copyright (c) Motorola 1999,2000,2001 + * Copyright (c) La Monte H.P. Yarroll 2001 + * + * This file is part of the SCTP kernel implementation. + * + * A collection class to handle the storage of transport addresses. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/in.h> +#include <net/sock.h> +#include <net/ipv6.h> +#include <net/if_inet6.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Forward declarations for internal helpers. */ +static int sctp_copy_one_addr(struct net *, struct sctp_bind_addr *, + union sctp_addr *, sctp_scope_t scope, gfp_t gfp, + int flags); +static void sctp_bind_addr_clean(struct sctp_bind_addr *); + +/* First Level Abstractions. */ + +/* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses + * in 'src' which have a broader scope than 'scope'. + */ +int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, + const struct sctp_bind_addr *src, + sctp_scope_t scope, gfp_t gfp, + int flags) +{ + struct sctp_sockaddr_entry *addr; + int error = 0; + + /* All addresses share the same port. */ + dest->port = src->port; + + /* Extract the addresses which are relevant for this scope. */ + list_for_each_entry(addr, &src->address_list, list) { + error = sctp_copy_one_addr(net, dest, &addr->a, scope, + gfp, flags); + if (error < 0) + goto out; + } + + /* If there are no addresses matching the scope and + * this is global scope, try to get a link scope address, with + * the assumption that we must be sitting behind a NAT. + */ + if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) { + list_for_each_entry(addr, &src->address_list, list) { + error = sctp_copy_one_addr(net, dest, &addr->a, + SCTP_SCOPE_LINK, gfp, + flags); + if (error < 0) + goto out; + } + } + +out: + if (error) + sctp_bind_addr_clean(dest); + + return error; +} + +/* Exactly duplicate the address lists. This is necessary when doing + * peer-offs and accepts. We don't want to put all the current system + * addresses into the endpoint. That's useless. But we do want duplicat + * the list of bound addresses that the older endpoint used. + */ +int sctp_bind_addr_dup(struct sctp_bind_addr *dest, + const struct sctp_bind_addr *src, + gfp_t gfp) +{ + struct sctp_sockaddr_entry *addr; + int error = 0; + + /* All addresses share the same port. */ + dest->port = src->port; + + list_for_each_entry(addr, &src->address_list, list) { + error = sctp_add_bind_addr(dest, &addr->a, 1, gfp); + if (error < 0) + break; + } + + return error; +} + +/* Initialize the SCTP_bind_addr structure for either an endpoint or + * an association. + */ +void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port) +{ + INIT_LIST_HEAD(&bp->address_list); + bp->port = port; +} + +/* Dispose of the address list. */ +static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) +{ + struct sctp_sockaddr_entry *addr, *temp; + + /* Empty the bind address list. */ + list_for_each_entry_safe(addr, temp, &bp->address_list, list) { + list_del_rcu(&addr->list); + kfree_rcu(addr, rcu); + SCTP_DBG_OBJCNT_DEC(addr); + } +} + +/* Dispose of an SCTP_bind_addr structure */ +void sctp_bind_addr_free(struct sctp_bind_addr *bp) +{ + /* Empty the bind address list. */ + sctp_bind_addr_clean(bp); +} + +/* Add an address to the bind address list in the SCTP_bind_addr structure. */ +int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, + __u8 addr_state, gfp_t gfp) +{ + struct sctp_sockaddr_entry *addr; + + /* Add the address to the bind address list. */ + addr = kzalloc(sizeof(*addr), gfp); + if (!addr) + return -ENOMEM; + + memcpy(&addr->a, new, sizeof(*new)); + + /* Fix up the port if it has not yet been set. + * Both v4 and v6 have the port at the same offset. + */ + if (!addr->a.v4.sin_port) + addr->a.v4.sin_port = htons(bp->port); + + addr->state = addr_state; + addr->valid = 1; + + INIT_LIST_HEAD(&addr->list); + + /* We always hold a socket lock when calling this function, + * and that acts as a writer synchronizing lock. + */ + list_add_tail_rcu(&addr->list, &bp->address_list); + SCTP_DBG_OBJCNT_INC(addr); + + return 0; +} + +/* Delete an address from the bind address list in the SCTP_bind_addr + * structure. + */ +int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) +{ + struct sctp_sockaddr_entry *addr, *temp; + int found = 0; + + /* We hold the socket lock when calling this function, + * and that acts as a writer synchronizing lock. + */ + list_for_each_entry_safe(addr, temp, &bp->address_list, list) { + if (sctp_cmp_addr_exact(&addr->a, del_addr)) { + /* Found the exact match. */ + found = 1; + addr->valid = 0; + list_del_rcu(&addr->list); + break; + } + } + + if (found) { + kfree_rcu(addr, rcu); + SCTP_DBG_OBJCNT_DEC(addr); + return 0; + } + + return -EINVAL; +} + +/* Create a network byte-order representation of all the addresses + * formated as SCTP parameters. + * + * The second argument is the return value for the length. + */ +union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, + int *addrs_len, + gfp_t gfp) +{ + union sctp_params addrparms; + union sctp_params retval; + int addrparms_len; + union sctp_addr_param rawaddr; + int len; + struct sctp_sockaddr_entry *addr; + struct list_head *pos; + struct sctp_af *af; + + addrparms_len = 0; + len = 0; + + /* Allocate enough memory at once. */ + list_for_each(pos, &bp->address_list) { + len += sizeof(union sctp_addr_param); + } + + /* Don't even bother embedding an address if there + * is only one. + */ + if (len == sizeof(union sctp_addr_param)) { + retval.v = NULL; + goto end_raw; + } + + retval.v = kmalloc(len, gfp); + if (!retval.v) + goto end_raw; + + addrparms = retval; + + list_for_each_entry(addr, &bp->address_list, list) { + af = sctp_get_af_specific(addr->a.v4.sin_family); + len = af->to_addr_param(&addr->a, &rawaddr); + memcpy(addrparms.v, &rawaddr, len); + addrparms.v += len; + addrparms_len += len; + } + +end_raw: + *addrs_len = addrparms_len; + return retval; +} + +/* + * Create an address list out of the raw address list format (IPv4 and IPv6 + * address parameters). + */ +int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, + int addrs_len, __u16 port, gfp_t gfp) +{ + union sctp_addr_param *rawaddr; + struct sctp_paramhdr *param; + union sctp_addr addr; + int retval = 0; + int len; + struct sctp_af *af; + + /* Convert the raw address to standard address format */ + while (addrs_len) { + param = (struct sctp_paramhdr *)raw_addr_list; + rawaddr = (union sctp_addr_param *)raw_addr_list; + + af = sctp_get_af_specific(param_type2af(param->type)); + if (unlikely(!af)) { + retval = -EINVAL; + sctp_bind_addr_clean(bp); + break; + } + + af->from_addr_param(&addr, rawaddr, htons(port), 0); + retval = sctp_add_bind_addr(bp, &addr, SCTP_ADDR_SRC, gfp); + if (retval) { + /* Can't finish building the list, clean up. */ + sctp_bind_addr_clean(bp); + break; + } + + len = ntohs(param->length); + addrs_len -= len; + raw_addr_list += len; + } + + return retval; +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* Does this contain a specified address? Allow wildcarding. */ +int sctp_bind_addr_match(struct sctp_bind_addr *bp, + const union sctp_addr *addr, + struct sctp_sock *opt) +{ + struct sctp_sockaddr_entry *laddr; + int match = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid) + continue; + if (opt->pf->cmp_addr(&laddr->a, addr, opt)) { + match = 1; + break; + } + } + rcu_read_unlock(); + + return match; +} + +/* Does the address 'addr' conflict with any addresses in + * the bp. + */ +int sctp_bind_addr_conflict(struct sctp_bind_addr *bp, + const union sctp_addr *addr, + struct sctp_sock *bp_sp, + struct sctp_sock *addr_sp) +{ + struct sctp_sockaddr_entry *laddr; + int conflict = 0; + struct sctp_sock *sp; + + /* Pick the IPv6 socket as the basis of comparison + * since it's usually a superset of the IPv4. + * If there is no IPv6 socket, then default to bind_addr. + */ + if (sctp_opt2sk(bp_sp)->sk_family == AF_INET6) + sp = bp_sp; + else if (sctp_opt2sk(addr_sp)->sk_family == AF_INET6) + sp = addr_sp; + else + sp = bp_sp; + + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid) + continue; + + conflict = sp->pf->cmp_addr(&laddr->a, addr, sp); + if (conflict) + break; + } + rcu_read_unlock(); + + return conflict; +} + +/* Get the state of the entry in the bind_addr_list */ +int sctp_bind_addr_state(const struct sctp_bind_addr *bp, + const union sctp_addr *addr) +{ + struct sctp_sockaddr_entry *laddr; + struct sctp_af *af; + int state = -1; + + af = sctp_get_af_specific(addr->sa.sa_family); + if (unlikely(!af)) + return state; + + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid) + continue; + if (af->cmp_addr(&laddr->a, addr)) { + state = laddr->state; + break; + } + } + rcu_read_unlock(); + + return state; +} + +/* Find the first address in the bind address list that is not present in + * the addrs packed array. + */ +union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, + const union sctp_addr *addrs, + int addrcnt, + struct sctp_sock *opt) +{ + struct sctp_sockaddr_entry *laddr; + union sctp_addr *addr; + void *addr_buf; + struct sctp_af *af; + int i; + + /* This is only called sctp_send_asconf_del_ip() and we hold + * the socket lock in that code patch, so that address list + * can't change. + */ + list_for_each_entry(laddr, &bp->address_list, list) { + addr_buf = (union sctp_addr *)addrs; + for (i = 0; i < addrcnt; i++) { + addr = addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + if (!af) + break; + + if (opt->pf->cmp_addr(&laddr->a, addr, opt)) + break; + + addr_buf += af->sockaddr_len; + } + if (i == addrcnt) + return &laddr->a; + } + + return NULL; +} + +/* Copy out addresses from the global local address list. */ +static int sctp_copy_one_addr(struct net *net, struct sctp_bind_addr *dest, + union sctp_addr *addr, + sctp_scope_t scope, gfp_t gfp, + int flags) +{ + int error = 0; + + if (sctp_is_any(NULL, addr)) { + error = sctp_copy_local_addr_list(net, dest, scope, gfp, flags); + } else if (sctp_in_scope(net, addr, scope)) { + /* Now that the address is in scope, check to see if + * the address type is supported by local sock as + * well as the remote peer. + */ + if ((((AF_INET == addr->sa.sa_family) && + (flags & SCTP_ADDR4_PEERSUPP))) || + (((AF_INET6 == addr->sa.sa_family) && + (flags & SCTP_ADDR6_ALLOWED) && + (flags & SCTP_ADDR6_PEERSUPP)))) + error = sctp_add_bind_addr(dest, addr, SCTP_ADDR_SRC, + gfp); + } + + return error; +} + +/* Is this a wildcard address? */ +int sctp_is_any(struct sock *sk, const union sctp_addr *addr) +{ + unsigned short fam = 0; + struct sctp_af *af; + + /* Try to get the right address family */ + if (addr->sa.sa_family != AF_UNSPEC) + fam = addr->sa.sa_family; + else if (sk) + fam = sk->sk_family; + + af = sctp_get_af_specific(fam); + if (!af) + return 0; + + return af->is_any(addr); +} + +/* Is 'addr' valid for 'scope'? */ +int sctp_in_scope(struct net *net, const union sctp_addr *addr, sctp_scope_t scope) +{ + sctp_scope_t addr_scope = sctp_scope(addr); + + /* The unusable SCTP addresses will not be considered with + * any defined scopes. + */ + if (SCTP_SCOPE_UNUSABLE == addr_scope) + return 0; + /* + * For INIT and INIT-ACK address list, let L be the level of + * of requested destination address, sender and receiver + * SHOULD include all of its addresses with level greater + * than or equal to L. + * + * Address scoping can be selectively controlled via sysctl + * option + */ + switch (net->sctp.scope_policy) { + case SCTP_SCOPE_POLICY_DISABLE: + return 1; + case SCTP_SCOPE_POLICY_ENABLE: + if (addr_scope <= scope) + return 1; + break; + case SCTP_SCOPE_POLICY_PRIVATE: + if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope) + return 1; + break; + case SCTP_SCOPE_POLICY_LINK: + if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope) + return 1; + break; + default: + break; + } + + return 0; +} + +int sctp_is_ep_boundall(struct sock *sk) +{ + struct sctp_bind_addr *bp; + struct sctp_sockaddr_entry *addr; + + bp = &sctp_sk(sk)->ep->base.bind_addr; + if (sctp_list_single_entry(&bp->address_list)) { + addr = list_entry(bp->address_list.next, + struct sctp_sockaddr_entry, list); + if (sctp_is_any(sk, &addr->a)) + return 1; + } + return 0; +} + +/******************************************************************** + * 3rd Level Abstractions + ********************************************************************/ + +/* What is the scope of 'addr'? */ +sctp_scope_t sctp_scope(const union sctp_addr *addr) +{ + struct sctp_af *af; + + af = sctp_get_af_specific(addr->sa.sa_family); + if (!af) + return SCTP_SCOPE_UNUSABLE; + + return af->scope((union sctp_addr *)addr); +} diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c new file mode 100644 index 000000000..a3380917f --- /dev/null +++ b/net/sctp/chunk.c @@ -0,0 +1,365 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2003, 2004 + * + * This file is part of the SCTP kernel implementation + * + * This file contains the code relating the chunk abstraction. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Jon Grimm <jgrimm@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/net.h> +#include <linux/inet.h> +#include <linux/skbuff.h> +#include <linux/slab.h> +#include <net/sock.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* This file is mostly in anticipation of future work, but initially + * populate with fragment tracking for an outbound message. + */ + +/* Initialize datamsg from memory. */ +static void sctp_datamsg_init(struct sctp_datamsg *msg) +{ + atomic_set(&msg->refcnt, 1); + msg->send_failed = 0; + msg->send_error = 0; + msg->can_abandon = 0; + msg->can_delay = 1; + msg->expires_at = 0; + INIT_LIST_HEAD(&msg->chunks); +} + +/* Allocate and initialize datamsg. */ +static struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) +{ + struct sctp_datamsg *msg; + msg = kmalloc(sizeof(struct sctp_datamsg), gfp); + if (msg) { + sctp_datamsg_init(msg); + SCTP_DBG_OBJCNT_INC(datamsg); + } + return msg; +} + +void sctp_datamsg_free(struct sctp_datamsg *msg) +{ + struct sctp_chunk *chunk; + + /* This doesn't have to be a _safe vairant because + * sctp_chunk_free() only drops the refs. + */ + list_for_each_entry(chunk, &msg->chunks, frag_list) + sctp_chunk_free(chunk); + + sctp_datamsg_put(msg); +} + +/* Final destructruction of datamsg memory. */ +static void sctp_datamsg_destroy(struct sctp_datamsg *msg) +{ + struct list_head *pos, *temp; + struct sctp_chunk *chunk; + struct sctp_sock *sp; + struct sctp_ulpevent *ev; + struct sctp_association *asoc = NULL; + int error = 0, notify; + + /* If we failed, we may need to notify. */ + notify = msg->send_failed ? -1 : 0; + + /* Release all references. */ + list_for_each_safe(pos, temp, &msg->chunks) { + list_del_init(pos); + chunk = list_entry(pos, struct sctp_chunk, frag_list); + /* Check whether we _really_ need to notify. */ + if (notify < 0) { + asoc = chunk->asoc; + if (msg->send_error) + error = msg->send_error; + else + error = asoc->outqueue.error; + + sp = sctp_sk(asoc->base.sk); + notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED, + &sp->subscribe); + } + + /* Generate a SEND FAILED event only if enabled. */ + if (notify > 0) { + int sent; + if (chunk->has_tsn) + sent = SCTP_DATA_SENT; + else + sent = SCTP_DATA_UNSENT; + + ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, + error, GFP_ATOMIC); + if (ev) + sctp_ulpq_tail_event(&asoc->ulpq, ev); + } + + sctp_chunk_put(chunk); + } + + SCTP_DBG_OBJCNT_DEC(datamsg); + kfree(msg); +} + +/* Hold a reference. */ +static void sctp_datamsg_hold(struct sctp_datamsg *msg) +{ + atomic_inc(&msg->refcnt); +} + +/* Release a reference. */ +void sctp_datamsg_put(struct sctp_datamsg *msg) +{ + if (atomic_dec_and_test(&msg->refcnt)) + sctp_datamsg_destroy(msg); +} + +/* Assign a chunk to this datamsg. */ +static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) +{ + sctp_datamsg_hold(msg); + chunk->msg = msg; +} + + +/* A data chunk can have a maximum payload of (2^16 - 20). Break + * down any such message into smaller chunks. Opportunistically, fragment + * the chunks down to the current MTU constraints. We may get refragmented + * later if the PMTU changes, but it is _much better_ to fragment immediately + * with a reasonable guess than always doing our fragmentation on the + * soft-interrupt. + */ +struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, + struct sctp_sndrcvinfo *sinfo, + struct iov_iter *from) +{ + int max, whole, i, offset, over, err; + int len, first_len; + int max_data; + struct sctp_chunk *chunk; + struct sctp_datamsg *msg; + struct list_head *pos, *temp; + size_t msg_len = iov_iter_count(from); + __u8 frag; + + msg = sctp_datamsg_new(GFP_KERNEL); + if (!msg) + return ERR_PTR(-ENOMEM); + + /* Note: Calculate this outside of the loop, so that all fragments + * have the same expiration. + */ + if (sinfo->sinfo_timetolive) { + /* sinfo_timetolive is in milliseconds */ + msg->expires_at = jiffies + + msecs_to_jiffies(sinfo->sinfo_timetolive); + msg->can_abandon = 1; + + pr_debug("%s: msg:%p expires_at:%ld jiffies:%ld\n", __func__, + msg, msg->expires_at, jiffies); + } + + /* This is the biggest possible DATA chunk that can fit into + * the packet + */ + max_data = (asoc->pathmtu - + sctp_sk(asoc->base.sk)->pf->af->net_header_len - + sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3; + + max = asoc->frag_point; + /* If the the peer requested that we authenticate DATA chunks + * we need to account for bundling of the AUTH chunks along with + * DATA. + */ + if (sctp_auth_send_cid(SCTP_CID_DATA, asoc)) { + struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc); + + if (hmac_desc) + max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) + + hmac_desc->hmac_len); + } + + /* Now, check if we need to reduce our max */ + if (max > max_data) + max = max_data; + + whole = 0; + first_len = max; + + /* Check to see if we have a pending SACK and try to let it be bundled + * with this message. Do this if we don't have any data queued already. + * To check that, look at out_qlen and retransmit list. + * NOTE: we will not reduce to account for SACK, if the message would + * not have been fragmented. + */ + if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) && + asoc->outqueue.out_qlen == 0 && + list_empty(&asoc->outqueue.retransmit) && + msg_len > max) + max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t)); + + /* Encourage Cookie-ECHO bundling. */ + if (asoc->state < SCTP_STATE_COOKIE_ECHOED) + max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN; + + /* Now that we adjusted completely, reset first_len */ + if (first_len > max_data) + first_len = max_data; + + /* Account for a different sized first fragment */ + if (msg_len >= first_len) { + msg_len -= first_len; + whole = 1; + msg->can_delay = 0; + } + + /* How many full sized? How many bytes leftover? */ + whole += msg_len / max; + over = msg_len % max; + offset = 0; + + if ((whole > 1) || (whole && over)) + SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); + + /* Create chunks for all the full sized DATA chunks. */ + for (i = 0, len = first_len; i < whole; i++) { + frag = SCTP_DATA_MIDDLE_FRAG; + + if (0 == i) + frag |= SCTP_DATA_FIRST_FRAG; + + if ((i == (whole - 1)) && !over) { + frag |= SCTP_DATA_LAST_FRAG; + + /* The application requests to set the I-bit of the + * last DATA chunk of a user message when providing + * the user message to the SCTP implementation. + */ + if ((sinfo->sinfo_flags & SCTP_EOF) || + (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY)) + frag |= SCTP_DATA_SACK_IMM; + } + + chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); + + if (!chunk) { + err = -ENOMEM; + goto errout; + } + + err = sctp_user_addto_chunk(chunk, len, from); + if (err < 0) + goto errout_chunk_free; + + /* Put the chunk->skb back into the form expected by send. */ + __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr + - (__u8 *)chunk->skb->data); + + sctp_datamsg_assign(msg, chunk); + list_add_tail(&chunk->frag_list, &msg->chunks); + + /* The first chunk, the first chunk was likely short + * to allow bundling, so reset to full size. + */ + if (0 == i) + len = max; + } + + /* .. now the leftover bytes. */ + if (over) { + if (!whole) + frag = SCTP_DATA_NOT_FRAG; + else + frag = SCTP_DATA_LAST_FRAG; + + if ((sinfo->sinfo_flags & SCTP_EOF) || + (sinfo->sinfo_flags & SCTP_SACK_IMMEDIATELY)) + frag |= SCTP_DATA_SACK_IMM; + + chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); + + if (!chunk) { + err = -ENOMEM; + goto errout; + } + + err = sctp_user_addto_chunk(chunk, over, from); + + /* Put the chunk->skb back into the form expected by send. */ + __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr + - (__u8 *)chunk->skb->data); + if (err < 0) + goto errout_chunk_free; + + sctp_datamsg_assign(msg, chunk); + list_add_tail(&chunk->frag_list, &msg->chunks); + } + + return msg; + +errout_chunk_free: + sctp_chunk_free(chunk); + +errout: + list_for_each_safe(pos, temp, &msg->chunks) { + list_del_init(pos); + chunk = list_entry(pos, struct sctp_chunk, frag_list); + sctp_chunk_free(chunk); + } + sctp_datamsg_put(msg); + return ERR_PTR(err); +} + +/* Check whether this message has expired. */ +int sctp_chunk_abandoned(struct sctp_chunk *chunk) +{ + struct sctp_datamsg *msg = chunk->msg; + + if (!msg->can_abandon) + return 0; + + if (time_after(jiffies, msg->expires_at)) + return 1; + + return 0; +} + +/* This chunk (and consequently entire message) has failed in its sending. */ +void sctp_chunk_fail(struct sctp_chunk *chunk, int error) +{ + chunk->msg->send_failed = 1; + chunk->msg->send_error = error; +} diff --git a/net/sctp/debug.c b/net/sctp/debug.c new file mode 100644 index 000000000..95d7b15da --- /dev/null +++ b/net/sctp/debug.c @@ -0,0 +1,172 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * This file converts numerical ID value to alphabetical names for SCTP + * terms such as chunk type, parameter time, event type, etc. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Xingang Guo <xingang.guo@intel.com> + * Jon Grimm <jgrimm@us.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#include <net/sctp/sctp.h> + +/* These are printable forms of Chunk ID's from section 3.1. */ +static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { + "DATA", + "INIT", + "INIT_ACK", + "SACK", + "HEARTBEAT", + "HEARTBEAT_ACK", + "ABORT", + "SHUTDOWN", + "SHUTDOWN_ACK", + "ERROR", + "COOKIE_ECHO", + "COOKIE_ACK", + "ECN_ECNE", + "ECN_CWR", + "SHUTDOWN_COMPLETE", +}; + +/* Lookup "chunk type" debug name. */ +const char *sctp_cname(const sctp_subtype_t cid) +{ + if (cid.chunk <= SCTP_CID_BASE_MAX) + return sctp_cid_tbl[cid.chunk]; + + switch (cid.chunk) { + case SCTP_CID_ASCONF: + return "ASCONF"; + + case SCTP_CID_ASCONF_ACK: + return "ASCONF_ACK"; + + case SCTP_CID_FWD_TSN: + return "FWD_TSN"; + + case SCTP_CID_AUTH: + return "AUTH"; + + default: + break; + } + + return "unknown chunk"; +} + +/* These are printable forms of the states. */ +const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { + "STATE_CLOSED", + "STATE_COOKIE_WAIT", + "STATE_COOKIE_ECHOED", + "STATE_ESTABLISHED", + "STATE_SHUTDOWN_PENDING", + "STATE_SHUTDOWN_SENT", + "STATE_SHUTDOWN_RECEIVED", + "STATE_SHUTDOWN_ACK_SENT", +}; + +/* Events that could change the state of an association. */ +const char *const sctp_evttype_tbl[] = { + "EVENT_T_unknown", + "EVENT_T_CHUNK", + "EVENT_T_TIMEOUT", + "EVENT_T_OTHER", + "EVENT_T_PRIMITIVE" +}; + +/* Return value of a state function */ +const char *const sctp_status_tbl[] = { + "DISPOSITION_DISCARD", + "DISPOSITION_CONSUME", + "DISPOSITION_NOMEM", + "DISPOSITION_DELETE_TCB", + "DISPOSITION_ABORT", + "DISPOSITION_VIOLATION", + "DISPOSITION_NOT_IMPL", + "DISPOSITION_ERROR", + "DISPOSITION_BUG" +}; + +/* Printable forms of primitives */ +static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { + "PRIMITIVE_ASSOCIATE", + "PRIMITIVE_SHUTDOWN", + "PRIMITIVE_ABORT", + "PRIMITIVE_SEND", + "PRIMITIVE_REQUESTHEARTBEAT", + "PRIMITIVE_ASCONF", +}; + +/* Lookup primitive debug name. */ +const char *sctp_pname(const sctp_subtype_t id) +{ + if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX) + return sctp_primitive_tbl[id.primitive]; + return "unknown_primitive"; +} + +static const char *const sctp_other_tbl[] = { + "NO_PENDING_TSN", + "ICMP_PROTO_UNREACH", +}; + +/* Lookup "other" debug name. */ +const char *sctp_oname(const sctp_subtype_t id) +{ + if (id.other <= SCTP_EVENT_OTHER_MAX) + return sctp_other_tbl[id.other]; + return "unknown 'other' event"; +} + +static const char *const sctp_timer_tbl[] = { + "TIMEOUT_NONE", + "TIMEOUT_T1_COOKIE", + "TIMEOUT_T1_INIT", + "TIMEOUT_T2_SHUTDOWN", + "TIMEOUT_T3_RTX", + "TIMEOUT_T4_RTO", + "TIMEOUT_T5_SHUTDOWN_GUARD", + "TIMEOUT_HEARTBEAT", + "TIMEOUT_SACK", + "TIMEOUT_AUTOCLOSE", +}; + +/* Lookup timer debug name. */ +const char *sctp_tname(const sctp_subtype_t id) +{ + if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) + return sctp_timer_tbl[id.timeout]; + return "unknown_timer"; +} diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c new file mode 100644 index 000000000..9da76ba4d --- /dev/null +++ b/net/sctp/endpointola.c @@ -0,0 +1,501 @@ +/* SCTP kernel implementation + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2002 International Business Machines, Corp. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * This abstraction represents an SCTP endpoint. + * + * The SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * The SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@austin.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + * Dajiang Zhang <dajiang.zhang@nokia.com> + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/in.h> +#include <linux/random.h> /* get_random_bytes() */ +#include <linux/crypto.h> +#include <net/sock.h> +#include <net/ipv6.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Forward declarations for internal helpers. */ +static void sctp_endpoint_bh_rcv(struct work_struct *work); + +/* + * Initialize the base fields of the endpoint structure. + */ +static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, + struct sock *sk, + gfp_t gfp) +{ + struct net *net = sock_net(sk); + struct sctp_hmac_algo_param *auth_hmacs = NULL; + struct sctp_chunks_param *auth_chunks = NULL; + struct sctp_shared_key *null_key; + int err; + + ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); + if (!ep->digest) + return NULL; + + ep->auth_enable = net->sctp.auth_enable; + if (ep->auth_enable) { + /* Allocate space for HMACS and CHUNKS authentication + * variables. There are arrays that we encode directly + * into parameters to make the rest of the operations easier. + */ + auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); + if (!auth_hmacs) + goto nomem; + + auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + + SCTP_NUM_CHUNK_TYPES, gfp); + if (!auth_chunks) + goto nomem; + + /* Initialize the HMACS parameter. + * SCTP-AUTH: Section 3.3 + * Every endpoint supporting SCTP chunk authentication MUST + * support the HMAC based on the SHA-1 algorithm. + */ + auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; + auth_hmacs->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); + + /* Initialize the CHUNKS parameter */ + auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; + auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); + + /* If the Add-IP functionality is enabled, we must + * authenticate, ASCONF and ASCONF-ACK chunks + */ + if (net->sctp.addip_enable) { + auth_chunks->chunks[0] = SCTP_CID_ASCONF; + auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; + auth_chunks->param_hdr.length = + htons(sizeof(sctp_paramhdr_t) + 2); + } + } + + /* Initialize the base structure. */ + /* What type of endpoint are we? */ + ep->base.type = SCTP_EP_TYPE_SOCKET; + + /* Initialize the basic object fields. */ + atomic_set(&ep->base.refcnt, 1); + ep->base.dead = false; + + /* Create an input queue. */ + sctp_inq_init(&ep->base.inqueue); + + /* Set its top-half handler */ + sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); + + /* Initialize the bind addr area */ + sctp_bind_addr_init(&ep->base.bind_addr, 0); + + /* Remember who we are attached to. */ + ep->base.sk = sk; + sock_hold(ep->base.sk); + + /* Create the lists of associations. */ + INIT_LIST_HEAD(&ep->asocs); + + /* Use SCTP specific send buffer space queues. */ + ep->sndbuf_policy = net->sctp.sndbuf_policy; + + sk->sk_data_ready = sctp_data_ready; + sk->sk_write_space = sctp_write_space; + sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); + + /* Get the receive buffer policy for this endpoint */ + ep->rcvbuf_policy = net->sctp.rcvbuf_policy; + + /* Initialize the secret key used with cookie. */ + get_random_bytes(ep->secret_key, sizeof(ep->secret_key)); + + /* SCTP-AUTH extensions*/ + INIT_LIST_HEAD(&ep->endpoint_shared_keys); + null_key = sctp_auth_shkey_create(0, gfp); + if (!null_key) + goto nomem; + + list_add(&null_key->key_list, &ep->endpoint_shared_keys); + + /* Allocate and initialize transorms arrays for supported HMACs. */ + err = sctp_auth_init_hmacs(ep, gfp); + if (err) + goto nomem_hmacs; + + /* Add the null key to the endpoint shared keys list and + * set the hmcas and chunks pointers. + */ + ep->auth_hmacs_list = auth_hmacs; + ep->auth_chunk_list = auth_chunks; + + return ep; + +nomem_hmacs: + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); +nomem: + /* Free all allocations */ + kfree(auth_hmacs); + kfree(auth_chunks); + kfree(ep->digest); + return NULL; + +} + +/* Create a sctp_endpoint with all that boring stuff initialized. + * Returns NULL if there isn't enough memory. + */ +struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) +{ + struct sctp_endpoint *ep; + + /* Build a local endpoint. */ + ep = kzalloc(sizeof(*ep), gfp); + if (!ep) + goto fail; + + if (!sctp_endpoint_init(ep, sk, gfp)) + goto fail_init; + + SCTP_DBG_OBJCNT_INC(ep); + return ep; + +fail_init: + kfree(ep); +fail: + return NULL; +} + +/* Add an association to an endpoint. */ +void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, + struct sctp_association *asoc) +{ + struct sock *sk = ep->base.sk; + + /* If this is a temporary association, don't bother + * since we'll be removing it shortly and don't + * want anyone to find it anyway. + */ + if (asoc->temp) + return; + + /* Now just add it to our list of asocs */ + list_add_tail(&asoc->asocs, &ep->asocs); + + /* Increment the backlog value for a TCP-style listening socket. */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) + sk->sk_ack_backlog++; +} + +/* Free the endpoint structure. Delay cleanup until + * all users have released their reference count on this structure. + */ +void sctp_endpoint_free(struct sctp_endpoint *ep) +{ + ep->base.dead = true; + + ep->base.sk->sk_state = SCTP_SS_CLOSED; + + /* Unlink this endpoint, so we can't find it again! */ + sctp_unhash_endpoint(ep); + + sctp_endpoint_put(ep); +} + +/* Final destructor for endpoint. */ +static void sctp_endpoint_destroy(struct sctp_endpoint *ep) +{ + struct sock *sk; + + if (unlikely(!ep->base.dead)) { + WARN(1, "Attempt to destroy undead endpoint %p!\n", ep); + return; + } + + /* Free the digest buffer */ + kfree(ep->digest); + + /* SCTP-AUTH: Free up AUTH releated data such as shared keys + * chunks and hmacs arrays that were allocated + */ + sctp_auth_destroy_keys(&ep->endpoint_shared_keys); + kfree(ep->auth_hmacs_list); + kfree(ep->auth_chunk_list); + + /* AUTH - Free any allocated HMAC transform containers */ + sctp_auth_destroy_hmacs(ep->auth_hmacs); + + /* Cleanup. */ + sctp_inq_free(&ep->base.inqueue); + sctp_bind_addr_free(&ep->base.bind_addr); + + memset(ep->secret_key, 0, sizeof(ep->secret_key)); + + /* Give up our hold on the sock. */ + sk = ep->base.sk; + if (sk != NULL) { + /* Remove and free the port */ + if (sctp_sk(sk)->bind_hash) + sctp_put_port(sk); + + sock_put(sk); + } + + kfree(ep); + SCTP_DBG_OBJCNT_DEC(ep); +} + +/* Hold a reference to an endpoint. */ +void sctp_endpoint_hold(struct sctp_endpoint *ep) +{ + atomic_inc(&ep->base.refcnt); +} + +/* Release a reference to an endpoint and clean up if there are + * no more references. + */ +void sctp_endpoint_put(struct sctp_endpoint *ep) +{ + if (atomic_dec_and_test(&ep->base.refcnt)) + sctp_endpoint_destroy(ep); +} + +/* Is this the endpoint we are looking for? */ +struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, + struct net *net, + const union sctp_addr *laddr) +{ + struct sctp_endpoint *retval = NULL; + + if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) && + net_eq(sock_net(ep->base.sk), net)) { + if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, + sctp_sk(ep->base.sk))) + retval = ep; + } + + return retval; +} + +/* Find the association that goes with this chunk. + * We do a linear search of the associations for this endpoint. + * We return the matching transport address too. + */ +static struct sctp_association *__sctp_endpoint_lookup_assoc( + const struct sctp_endpoint *ep, + const union sctp_addr *paddr, + struct sctp_transport **transport) +{ + struct sctp_association *asoc = NULL; + struct sctp_association *tmp; + struct sctp_transport *t = NULL; + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + int hash; + int rport; + + *transport = NULL; + + /* If the local port is not set, there can't be any associations + * on this endpoint. + */ + if (!ep->base.bind_addr.port) + goto out; + + rport = ntohs(paddr->v4.sin_port); + + hash = sctp_assoc_hashfn(sock_net(ep->base.sk), ep->base.bind_addr.port, + rport); + head = &sctp_assoc_hashtable[hash]; + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + tmp = sctp_assoc(epb); + if (tmp->ep != ep || rport != tmp->peer.port) + continue; + + t = sctp_assoc_lookup_paddr(tmp, paddr); + if (t) { + asoc = tmp; + *transport = t; + break; + } + } + read_unlock(&head->lock); +out: + return asoc; +} + +/* Lookup association on an endpoint based on a peer address. BH-safe. */ +struct sctp_association *sctp_endpoint_lookup_assoc( + const struct sctp_endpoint *ep, + const union sctp_addr *paddr, + struct sctp_transport **transport) +{ + struct sctp_association *asoc; + + local_bh_disable(); + asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); + local_bh_enable(); + + return asoc; +} + +/* Look for any peeled off association from the endpoint that matches the + * given peer address. + */ +int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, + const union sctp_addr *paddr) +{ + struct sctp_sockaddr_entry *addr; + struct sctp_bind_addr *bp; + struct net *net = sock_net(ep->base.sk); + + bp = &ep->base.bind_addr; + /* This function is called with the socket lock held, + * so the address_list can not change. + */ + list_for_each_entry(addr, &bp->address_list, list) { + if (sctp_has_association(net, &addr->a, paddr)) + return 1; + } + + return 0; +} + +/* Do delayed input processing. This is scheduled by sctp_rcv(). + * This may be called on BH or task time. + */ +static void sctp_endpoint_bh_rcv(struct work_struct *work) +{ + struct sctp_endpoint *ep = + container_of(work, struct sctp_endpoint, + base.inqueue.immediate); + struct sctp_association *asoc; + struct sock *sk; + struct net *net; + struct sctp_transport *transport; + struct sctp_chunk *chunk; + struct sctp_inq *inqueue; + sctp_subtype_t subtype; + sctp_state_t state; + int error = 0; + int first_time = 1; /* is this the first time through the loop */ + + if (ep->base.dead) + return; + + asoc = NULL; + inqueue = &ep->base.inqueue; + sk = ep->base.sk; + net = sock_net(sk); + + while (NULL != (chunk = sctp_inq_pop(inqueue))) { + subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); + + /* If the first chunk in the packet is AUTH, do special + * processing specified in Section 6.3 of SCTP-AUTH spec + */ + if (first_time && (subtype.chunk == SCTP_CID_AUTH)) { + struct sctp_chunkhdr *next_hdr; + + next_hdr = sctp_inq_peek(inqueue); + if (!next_hdr) + goto normal; + + /* If the next chunk is COOKIE-ECHO, skip the AUTH + * chunk while saving a pointer to it so we can do + * Authentication later (during cookie-echo + * processing). + */ + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) { + chunk->auth_chunk = skb_clone(chunk->skb, + GFP_ATOMIC); + chunk->auth = 1; + continue; + } + } +normal: + /* We might have grown an association since last we + * looked, so try again. + * + * This happens when we've just processed our + * COOKIE-ECHO chunk. + */ + if (NULL == chunk->asoc) { + asoc = sctp_endpoint_lookup_assoc(ep, + sctp_source(chunk), + &transport); + chunk->asoc = asoc; + chunk->transport = transport; + } + + state = asoc ? asoc->state : SCTP_STATE_CLOSED; + if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth) + continue; + + /* Remember where the last DATA chunk came from so we + * know where to send the SACK. + */ + if (asoc && sctp_chunk_is_data(chunk)) + asoc->peer.last_data_from = chunk->transport; + else { + SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); + if (asoc) + asoc->stats.ictrlchunks++; + } + + if (chunk->transport) + chunk->transport->last_time_heard = ktime_get(); + + error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype, state, + ep, asoc, chunk, GFP_ATOMIC); + + if (error && chunk) + chunk->pdiscard = 1; + + /* Check to see if the endpoint is freed in response to + * the incoming chunk. If so, get out of the while loop. + */ + if (!sctp_sk(sk)->ep) + break; + + if (first_time) + first_time = 0; + } +} diff --git a/net/sctp/input.c b/net/sctp/input.c new file mode 100644 index 000000000..b6493b3f1 --- /dev/null +++ b/net/sctp/input.c @@ -0,0 +1,1141 @@ +/* SCTP kernel implementation + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2003 International Business Machines, Corp. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * These functions handle all input from the IP layer into SCTP. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Xingang Guo <xingang.guo@intel.com> + * Jon Grimm <jgrimm@us.ibm.com> + * Hui Huang <hui.huang@nokia.com> + * Daisy Chang <daisyc@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + */ + +#include <linux/types.h> +#include <linux/list.h> /* For struct list_head */ +#include <linux/socket.h> +#include <linux/ip.h> +#include <linux/time.h> /* For struct timeval */ +#include <linux/slab.h> +#include <net/ip.h> +#include <net/icmp.h> +#include <net/snmp.h> +#include <net/sock.h> +#include <net/xfrm.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> +#include <net/sctp/checksum.h> +#include <net/net_namespace.h> + +/* Forward declarations for internal helpers. */ +static int sctp_rcv_ootb(struct sk_buff *); +static struct sctp_association *__sctp_rcv_lookup(struct net *net, + struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp); +static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, + const union sctp_addr *laddr); +static struct sctp_association *__sctp_lookup_association( + struct net *net, + const union sctp_addr *local, + const union sctp_addr *peer, + struct sctp_transport **pt); + +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb); + + +/* Calculate the SCTP checksum of an SCTP packet. */ +static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb) +{ + struct sctphdr *sh = sctp_hdr(skb); + __le32 cmp = sh->checksum; + __le32 val = sctp_compute_cksum(skb, 0); + + if (val != cmp) { + /* CRC failure, dump it. */ + SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS); + return -1; + } + return 0; +} + +struct sctp_input_cb { + union { + struct inet_skb_parm h4; +#if IS_ENABLED(CONFIG_IPV6) + struct inet6_skb_parm h6; +#endif + } header; + struct sctp_chunk *chunk; +}; +#define SCTP_INPUT_CB(__skb) ((struct sctp_input_cb *)&((__skb)->cb[0])) + +/* + * This is the routine which IP calls when receiving an SCTP packet. + */ +int sctp_rcv(struct sk_buff *skb) +{ + struct sock *sk; + struct sctp_association *asoc; + struct sctp_endpoint *ep = NULL; + struct sctp_ep_common *rcvr; + struct sctp_transport *transport = NULL; + struct sctp_chunk *chunk; + struct sctphdr *sh; + union sctp_addr src; + union sctp_addr dest; + int family; + struct sctp_af *af; + struct net *net = dev_net(skb->dev); + + if (skb->pkt_type != PACKET_HOST) + goto discard_it; + + SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS); + + if (skb_linearize(skb)) + goto discard_it; + + sh = sctp_hdr(skb); + + /* Pull up the IP and SCTP headers. */ + __skb_pull(skb, skb_transport_offset(skb)); + if (skb->len < sizeof(struct sctphdr)) + goto discard_it; + + skb->csum_valid = 0; /* Previous value not applicable */ + if (skb_csum_unnecessary(skb)) + __skb_decr_checksum_unnecessary(skb); + else if (!sctp_checksum_disable && sctp_rcv_checksum(net, skb) < 0) + goto discard_it; + skb->csum_valid = 1; + + skb_pull(skb, sizeof(struct sctphdr)); + + /* Make sure we at least have chunk headers worth of data left. */ + if (skb->len < sizeof(struct sctp_chunkhdr)) + goto discard_it; + + family = ipver2af(ip_hdr(skb)->version); + af = sctp_get_af_specific(family); + if (unlikely(!af)) + goto discard_it; + + /* Initialize local addresses for lookups. */ + af->from_skb(&src, skb, 1); + af->from_skb(&dest, skb, 0); + + /* If the packet is to or from a non-unicast address, + * silently discard the packet. + * + * This is not clearly defined in the RFC except in section + * 8.4 - OOTB handling. However, based on the book "Stream Control + * Transmission Protocol" 2.1, "It is important to note that the + * IP address of an SCTP transport address must be a routable + * unicast address. In other words, IP multicast addresses and + * IP broadcast addresses cannot be used in an SCTP transport + * address." + */ + if (!af->addr_valid(&src, NULL, skb) || + !af->addr_valid(&dest, NULL, skb)) + goto discard_it; + + asoc = __sctp_rcv_lookup(net, skb, &src, &dest, &transport); + + if (!asoc) + ep = __sctp_rcv_lookup_endpoint(net, &dest); + + /* Retrieve the common input handling substructure. */ + rcvr = asoc ? &asoc->base : &ep->base; + sk = rcvr->sk; + + /* + * If a frame arrives on an interface and the receiving socket is + * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB + */ + if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { + if (asoc) { + sctp_association_put(asoc); + asoc = NULL; + } else { + sctp_endpoint_put(ep); + ep = NULL; + } + sk = net->sctp.ctl_sock; + ep = sctp_sk(sk)->ep; + sctp_endpoint_hold(ep); + rcvr = &ep->base; + } + + /* + * RFC 2960, 8.4 - Handle "Out of the blue" Packets. + * An SCTP packet is called an "out of the blue" (OOTB) + * packet if it is correctly formed, i.e., passed the + * receiver's checksum check, but the receiver is not + * able to identify the association to which this + * packet belongs. + */ + if (!asoc) { + if (sctp_rcv_ootb(skb)) { + SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES); + goto discard_release; + } + } + + if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) + goto discard_release; + nf_reset(skb); + + if (sk_filter(sk, skb)) + goto discard_release; + + /* Create an SCTP packet structure. */ + chunk = sctp_chunkify(skb, asoc, sk); + if (!chunk) + goto discard_release; + SCTP_INPUT_CB(skb)->chunk = chunk; + + /* Remember what endpoint is to handle this packet. */ + chunk->rcvr = rcvr; + + /* Remember the SCTP header. */ + chunk->sctp_hdr = sh; + + /* Set the source and destination addresses of the incoming chunk. */ + sctp_init_addrs(chunk, &src, &dest); + + /* Remember where we came from. */ + chunk->transport = transport; + + /* Acquire access to the sock lock. Note: We are safe from other + * bottom halves on this lock, but a user may be in the lock too, + * so check if it is busy. + */ + bh_lock_sock(sk); + + if (sk != rcvr->sk) { + /* Our cached sk is different from the rcvr->sk. This is + * because migrate()/accept() may have moved the association + * to a new socket and released all the sockets. So now we + * are holding a lock on the old socket while the user may + * be doing something with the new socket. Switch our veiw + * of the current sk. + */ + bh_unlock_sock(sk); + sk = rcvr->sk; + bh_lock_sock(sk); + } + + if (sock_owned_by_user(sk)) { + if (sctp_add_backlog(sk, skb)) { + bh_unlock_sock(sk); + sctp_chunk_free(chunk); + skb = NULL; /* sctp_chunk_free already freed the skb */ + goto discard_release; + } + SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG); + } else { + SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ); + sctp_inq_push(&chunk->rcvr->inqueue, chunk); + } + + bh_unlock_sock(sk); + + /* Release the asoc/ep ref we took in the lookup calls. */ + if (asoc) + sctp_association_put(asoc); + else + sctp_endpoint_put(ep); + + return 0; + +discard_it: + SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS); + kfree_skb(skb); + return 0; + +discard_release: + /* Release the asoc/ep ref we took in the lookup calls. */ + if (asoc) + sctp_association_put(asoc); + else + sctp_endpoint_put(ep); + + goto discard_it; +} + +/* Process the backlog queue of the socket. Every skb on + * the backlog holds a ref on an association or endpoint. + * We hold this ref throughout the state machine to make + * sure that the structure we need is still around. + */ +int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) +{ + struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; + struct sctp_inq *inqueue = &chunk->rcvr->inqueue; + struct sctp_ep_common *rcvr = NULL; + int backloged = 0; + + rcvr = chunk->rcvr; + + /* If the rcvr is dead then the association or endpoint + * has been deleted and we can safely drop the chunk + * and refs that we are holding. + */ + if (rcvr->dead) { + sctp_chunk_free(chunk); + goto done; + } + + if (unlikely(rcvr->sk != sk)) { + /* In this case, the association moved from one socket to + * another. We are currently sitting on the backlog of the + * old socket, so we need to move. + * However, since we are here in the process context we + * need to take make sure that the user doesn't own + * the new socket when we process the packet. + * If the new socket is user-owned, queue the chunk to the + * backlog of the new socket without dropping any refs. + * Otherwise, we can safely push the chunk on the inqueue. + */ + + sk = rcvr->sk; + bh_lock_sock(sk); + + if (sock_owned_by_user(sk)) { + if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) + sctp_chunk_free(chunk); + else + backloged = 1; + } else + sctp_inq_push(inqueue, chunk); + + bh_unlock_sock(sk); + + /* If the chunk was backloged again, don't drop refs */ + if (backloged) + return 0; + } else { + sctp_inq_push(inqueue, chunk); + } + +done: + /* Release the refs we took in sctp_add_backlog */ + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) + sctp_association_put(sctp_assoc(rcvr)); + else if (SCTP_EP_TYPE_SOCKET == rcvr->type) + sctp_endpoint_put(sctp_ep(rcvr)); + else + BUG(); + + return 0; +} + +static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) +{ + struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; + struct sctp_ep_common *rcvr = chunk->rcvr; + int ret; + + ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf); + if (!ret) { + /* Hold the assoc/ep while hanging on the backlog queue. + * This way, we know structures we need will not disappear + * from us + */ + if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) + sctp_association_hold(sctp_assoc(rcvr)); + else if (SCTP_EP_TYPE_SOCKET == rcvr->type) + sctp_endpoint_hold(sctp_ep(rcvr)); + else + BUG(); + } + return ret; + +} + +/* Handle icmp frag needed error. */ +void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, + struct sctp_transport *t, __u32 pmtu) +{ + if (!t || (t->pathmtu <= pmtu)) + return; + + if (sock_owned_by_user(sk)) { + asoc->pmtu_pending = 1; + t->pmtu_pending = 1; + return; + } + + if (t->param_flags & SPP_PMTUD_ENABLE) { + /* Update transports view of the MTU */ + sctp_transport_update_pmtu(sk, t, pmtu); + + /* Update association pmtu. */ + sctp_assoc_sync_pmtu(sk, asoc); + } + + /* Retransmit with the new pmtu setting. + * Normally, if PMTU discovery is disabled, an ICMP Fragmentation + * Needed will never be sent, but if a message was sent before + * PMTU discovery was disabled that was larger than the PMTU, it + * would not be fragmented, so it must be re-transmitted fragmented. + */ + sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); +} + +void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, + struct sk_buff *skb) +{ + struct dst_entry *dst; + + if (!t) + return; + dst = sctp_transport_dst_check(t); + if (dst) + dst->ops->redirect(dst, sk, skb); +} + +/* + * SCTP Implementer's Guide, 2.37 ICMP handling procedures + * + * ICMP8) If the ICMP code is a "Unrecognized next header type encountered" + * or a "Protocol Unreachable" treat this message as an abort + * with the T bit set. + * + * This function sends an event to the state machine, which will abort the + * association. + * + */ +void sctp_icmp_proto_unreachable(struct sock *sk, + struct sctp_association *asoc, + struct sctp_transport *t) +{ + if (sock_owned_by_user(sk)) { + if (timer_pending(&t->proto_unreach_timer)) + return; + else { + if (!mod_timer(&t->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + } + } else { + struct net *net = sock_net(sk); + + pr_debug("%s: unrecognized next header type " + "encountered!\n", __func__); + + if (del_timer(&t->proto_unreach_timer)) + sctp_association_put(asoc); + + sctp_do_sm(net, SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, t, + GFP_ATOMIC); + } +} + +/* Common lookup code for icmp/icmpv6 error handler. */ +struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, + struct sctphdr *sctphdr, + struct sctp_association **app, + struct sctp_transport **tpp) +{ + union sctp_addr saddr; + union sctp_addr daddr; + struct sctp_af *af; + struct sock *sk = NULL; + struct sctp_association *asoc; + struct sctp_transport *transport = NULL; + struct sctp_init_chunk *chunkhdr; + __u32 vtag = ntohl(sctphdr->vtag); + int len = skb->len - ((void *)sctphdr - (void *)skb->data); + + *app = NULL; *tpp = NULL; + + af = sctp_get_af_specific(family); + if (unlikely(!af)) { + return NULL; + } + + /* Initialize local addresses for lookups. */ + af->from_skb(&saddr, skb, 1); + af->from_skb(&daddr, skb, 0); + + /* Look for an association that matches the incoming ICMP error + * packet. + */ + asoc = __sctp_lookup_association(net, &saddr, &daddr, &transport); + if (!asoc) + return NULL; + + sk = asoc->base.sk; + + /* RFC 4960, Appendix C. ICMP Handling + * + * ICMP6) An implementation MUST validate that the Verification Tag + * contained in the ICMP message matches the Verification Tag of + * the peer. If the Verification Tag is not 0 and does NOT + * match, discard the ICMP message. If it is 0 and the ICMP + * message contains enough bytes to verify that the chunk type is + * an INIT chunk and that the Initiate Tag matches the tag of the + * peer, continue with ICMP7. If the ICMP message is too short + * or the chunk type or the Initiate Tag does not match, silently + * discard the packet. + */ + if (vtag == 0) { + chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); + if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) + + sizeof(__be32) || + chunkhdr->chunk_hdr.type != SCTP_CID_INIT || + ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { + goto out; + } + } else if (vtag != asoc->c.peer_vtag) { + goto out; + } + + bh_lock_sock(sk); + + /* If too many ICMPs get dropped on busy + * servers this needs to be solved differently. + */ + if (sock_owned_by_user(sk)) + NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); + + *app = asoc; + *tpp = transport; + return sk; + +out: + sctp_association_put(asoc); + return NULL; +} + +/* Common cleanup code for icmp/icmpv6 error handler. */ +void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) +{ + bh_unlock_sock(sk); + sctp_association_put(asoc); +} + +/* + * This routine is called by the ICMP module when it gets some + * sort of error condition. If err < 0 then the socket should + * be closed and the error returned to the user. If err > 0 + * it's just the icmp type << 8 | icmp code. After adjustment + * header points to the first 8 bytes of the sctp header. We need + * to find the appropriate port. + * + * The locking strategy used here is very "optimistic". When + * someone else accesses the socket the ICMP is just dropped + * and for some paths there is no check at all. + * A more general error queue to queue errors for later handling + * is probably better. + * + */ +void sctp_v4_err(struct sk_buff *skb, __u32 info) +{ + const struct iphdr *iph = (const struct iphdr *)skb->data; + const int ihlen = iph->ihl * 4; + const int type = icmp_hdr(skb)->type; + const int code = icmp_hdr(skb)->code; + struct sock *sk; + struct sctp_association *asoc = NULL; + struct sctp_transport *transport; + struct inet_sock *inet; + __u16 saveip, savesctp; + int err; + struct net *net = dev_net(skb->dev); + + /* Fix up skb to look at the embedded net header. */ + saveip = skb->network_header; + savesctp = skb->transport_header; + skb_reset_network_header(skb); + skb_set_transport_header(skb, ihlen); + sk = sctp_err_lookup(net, AF_INET, skb, sctp_hdr(skb), &asoc, &transport); + /* Put back, the original values. */ + skb->network_header = saveip; + skb->transport_header = savesctp; + if (!sk) { + ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); + return; + } + /* Warning: The sock lock is held. Remember to call + * sctp_err_finish! + */ + + switch (type) { + case ICMP_PARAMETERPROB: + err = EPROTO; + break; + case ICMP_DEST_UNREACH: + if (code > NR_ICMP_UNREACH) + goto out_unlock; + + /* PMTU discovery (RFC1191) */ + if (ICMP_FRAG_NEEDED == code) { + sctp_icmp_frag_needed(sk, asoc, transport, info); + goto out_unlock; + } else { + if (ICMP_PROT_UNREACH == code) { + sctp_icmp_proto_unreachable(sk, asoc, + transport); + goto out_unlock; + } + } + err = icmp_err_convert[code].errno; + break; + case ICMP_TIME_EXCEEDED: + /* Ignore any time exceeded errors due to fragment reassembly + * timeouts. + */ + if (ICMP_EXC_FRAGTIME == code) + goto out_unlock; + + err = EHOSTUNREACH; + break; + case ICMP_REDIRECT: + sctp_icmp_redirect(sk, transport, skb); + /* Fall through to out_unlock. */ + default: + goto out_unlock; + } + + inet = inet_sk(sk); + if (!sock_owned_by_user(sk) && inet->recverr) { + sk->sk_err = err; + sk->sk_error_report(sk); + } else { /* Only an error on timeout */ + sk->sk_err_soft = err; + } + +out_unlock: + sctp_err_finish(sk, asoc); +} + +/* + * RFC 2960, 8.4 - Handle "Out of the blue" Packets. + * + * This function scans all the chunks in the OOTB packet to determine if + * the packet should be discarded right away. If a response might be needed + * for this packet, or, if further processing is possible, the packet will + * be queued to a proper inqueue for the next phase of handling. + * + * Output: + * Return 0 - If further processing is needed. + * Return 1 - If the packet can be discarded right away. + */ +static int sctp_rcv_ootb(struct sk_buff *skb) +{ + sctp_chunkhdr_t *ch; + __u8 *ch_end; + + ch = (sctp_chunkhdr_t *) skb->data; + + /* Scan through all the chunks in the packet. */ + do { + /* Break out if chunk length is less then minimal. */ + if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) + break; + + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb_tail_pointer(skb)) + break; + + /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the + * receiver MUST silently discard the OOTB packet and take no + * further action. + */ + if (SCTP_CID_ABORT == ch->type) + goto discard; + + /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE + * chunk, the receiver should silently discard the packet + * and take no further action. + */ + if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) + goto discard; + + /* RFC 4460, 2.11.2 + * This will discard packets with INIT chunk bundled as + * subsequent chunks in the packet. When INIT is first, + * the normal INIT processing will discard the chunk. + */ + if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) + goto discard; + + ch = (sctp_chunkhdr_t *) ch_end; + } while (ch_end < skb_tail_pointer(skb)); + + return 0; + +discard: + return 1; +} + +/* Insert endpoint into the hash table. */ +static void __sctp_hash_endpoint(struct sctp_endpoint *ep) +{ + struct net *net = sock_net(ep->base.sk); + struct sctp_ep_common *epb; + struct sctp_hashbucket *head; + + epb = &ep->base; + + epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); + head = &sctp_ep_hashtable[epb->hashent]; + + write_lock(&head->lock); + hlist_add_head(&epb->node, &head->chain); + write_unlock(&head->lock); +} + +/* Add an endpoint to the hash. Local BH-safe. */ +void sctp_hash_endpoint(struct sctp_endpoint *ep) +{ + local_bh_disable(); + __sctp_hash_endpoint(ep); + local_bh_enable(); +} + +/* Remove endpoint from the hash table. */ +static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) +{ + struct net *net = sock_net(ep->base.sk); + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + + epb = &ep->base; + + epb->hashent = sctp_ep_hashfn(net, epb->bind_addr.port); + + head = &sctp_ep_hashtable[epb->hashent]; + + write_lock(&head->lock); + hlist_del_init(&epb->node); + write_unlock(&head->lock); +} + +/* Remove endpoint from the hash. Local BH-safe. */ +void sctp_unhash_endpoint(struct sctp_endpoint *ep) +{ + local_bh_disable(); + __sctp_unhash_endpoint(ep); + local_bh_enable(); +} + +/* Look up an endpoint. */ +static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(struct net *net, + const union sctp_addr *laddr) +{ + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + struct sctp_endpoint *ep; + int hash; + + hash = sctp_ep_hashfn(net, ntohs(laddr->v4.sin_port)); + head = &sctp_ep_hashtable[hash]; + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + ep = sctp_ep(epb); + if (sctp_endpoint_is_match(ep, net, laddr)) + goto hit; + } + + ep = sctp_sk(net->sctp.ctl_sock)->ep; + +hit: + sctp_endpoint_hold(ep); + read_unlock(&head->lock); + return ep; +} + +/* Insert association into the hash table. */ +static void __sctp_hash_established(struct sctp_association *asoc) +{ + struct net *net = sock_net(asoc->base.sk); + struct sctp_ep_common *epb; + struct sctp_hashbucket *head; + + epb = &asoc->base; + + /* Calculate which chain this entry will belong to. */ + epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port, + asoc->peer.port); + + head = &sctp_assoc_hashtable[epb->hashent]; + + write_lock(&head->lock); + hlist_add_head(&epb->node, &head->chain); + write_unlock(&head->lock); +} + +/* Add an association to the hash. Local BH-safe. */ +void sctp_hash_established(struct sctp_association *asoc) +{ + if (asoc->temp) + return; + + local_bh_disable(); + __sctp_hash_established(asoc); + local_bh_enable(); +} + +/* Remove association from the hash table. */ +static void __sctp_unhash_established(struct sctp_association *asoc) +{ + struct net *net = sock_net(asoc->base.sk); + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + + epb = &asoc->base; + + epb->hashent = sctp_assoc_hashfn(net, epb->bind_addr.port, + asoc->peer.port); + + head = &sctp_assoc_hashtable[epb->hashent]; + + write_lock(&head->lock); + hlist_del_init(&epb->node); + write_unlock(&head->lock); +} + +/* Remove association from the hash table. Local BH-safe. */ +void sctp_unhash_established(struct sctp_association *asoc) +{ + if (asoc->temp) + return; + + local_bh_disable(); + __sctp_unhash_established(asoc); + local_bh_enable(); +} + +/* Look up an association. */ +static struct sctp_association *__sctp_lookup_association( + struct net *net, + const union sctp_addr *local, + const union sctp_addr *peer, + struct sctp_transport **pt) +{ + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + struct sctp_association *asoc; + struct sctp_transport *transport; + int hash; + + /* Optimize here for direct hit, only listening connections can + * have wildcards anyways. + */ + hash = sctp_assoc_hashfn(net, ntohs(local->v4.sin_port), + ntohs(peer->v4.sin_port)); + head = &sctp_assoc_hashtable[hash]; + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + asoc = sctp_assoc(epb); + transport = sctp_assoc_is_match(asoc, net, local, peer); + if (transport) + goto hit; + } + + read_unlock(&head->lock); + + return NULL; + +hit: + *pt = transport; + sctp_association_hold(asoc); + read_unlock(&head->lock); + return asoc; +} + +/* Look up an association. BH-safe. */ +static +struct sctp_association *sctp_lookup_association(struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr, + struct sctp_transport **transportp) +{ + struct sctp_association *asoc; + + local_bh_disable(); + asoc = __sctp_lookup_association(net, laddr, paddr, transportp); + local_bh_enable(); + + return asoc; +} + +/* Is there an association matching the given local and peer addresses? */ +int sctp_has_association(struct net *net, + const union sctp_addr *laddr, + const union sctp_addr *paddr) +{ + struct sctp_association *asoc; + struct sctp_transport *transport; + + if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { + sctp_association_put(asoc); + return 1; + } + + return 0; +} + +/* + * SCTP Implementors Guide, 2.18 Handling of address + * parameters within the INIT or INIT-ACK. + * + * D) When searching for a matching TCB upon reception of an INIT + * or INIT-ACK chunk the receiver SHOULD use not only the + * source address of the packet (containing the INIT or + * INIT-ACK) but the receiver SHOULD also use all valid + * address parameters contained within the chunk. + * + * 2.18.3 Solution description + * + * This new text clearly specifies to an implementor the need + * to look within the INIT or INIT-ACK. Any implementation that + * does not do this, may not be able to establish associations + * in certain circumstances. + * + */ +static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, + struct sk_buff *skb, + const union sctp_addr *laddr, struct sctp_transport **transportp) +{ + struct sctp_association *asoc; + union sctp_addr addr; + union sctp_addr *paddr = &addr; + struct sctphdr *sh = sctp_hdr(skb); + union sctp_params params; + sctp_init_chunk_t *init; + struct sctp_transport *transport; + struct sctp_af *af; + + /* + * This code will NOT touch anything inside the chunk--it is + * strictly READ-ONLY. + * + * RFC 2960 3 SCTP packet Format + * + * Multiple chunks can be bundled into one SCTP packet up to + * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN + * COMPLETE chunks. These chunks MUST NOT be bundled with any + * other chunk in a packet. See Section 6.10 for more details + * on chunk bundling. + */ + + /* Find the start of the TLVs and the end of the chunk. This is + * the region we search for address parameters. + */ + init = (sctp_init_chunk_t *)skb->data; + + /* Walk the parameters looking for embedded addresses. */ + sctp_walk_params(params, init, init_hdr.params) { + + /* Note: Ignoring hostname addresses. */ + af = sctp_get_af_specific(param_type2af(params.p->type)); + if (!af) + continue; + + af->from_addr_param(paddr, params.addr, sh->source, 0); + + asoc = __sctp_lookup_association(net, laddr, paddr, &transport); + if (asoc) + return asoc; + } + + return NULL; +} + +/* ADD-IP, Section 5.2 + * When an endpoint receives an ASCONF Chunk from the remote peer + * special procedures may be needed to identify the association the + * ASCONF Chunk is associated with. To properly find the association + * the following procedures SHOULD be followed: + * + * D2) If the association is not found, use the address found in the + * Address Parameter TLV combined with the port number found in the + * SCTP common header. If found proceed to rule D4. + * + * D2-ext) If more than one ASCONF Chunks are packed together, use the + * address found in the ASCONF Address Parameter TLV of each of the + * subsequent ASCONF Chunks. If found, proceed to rule D4. + */ +static struct sctp_association *__sctp_rcv_asconf_lookup( + struct net *net, + sctp_chunkhdr_t *ch, + const union sctp_addr *laddr, + __be16 peer_port, + struct sctp_transport **transportp) +{ + sctp_addip_chunk_t *asconf = (struct sctp_addip_chunk *)ch; + struct sctp_af *af; + union sctp_addr_param *param; + union sctp_addr paddr; + + /* Skip over the ADDIP header and find the Address parameter */ + param = (union sctp_addr_param *)(asconf + 1); + + af = sctp_get_af_specific(param_type2af(param->p.type)); + if (unlikely(!af)) + return NULL; + + af->from_addr_param(&paddr, param, peer_port, 0); + + return __sctp_lookup_association(net, laddr, &paddr, transportp); +} + + +/* SCTP-AUTH, Section 6.3: +* If the receiver does not find a STCB for a packet containing an AUTH +* chunk as the first chunk and not a COOKIE-ECHO chunk as the second +* chunk, it MUST use the chunks after the AUTH chunk to look up an existing +* association. +* +* This means that any chunks that can help us identify the association need +* to be looked at to find this association. +*/ +static struct sctp_association *__sctp_rcv_walk_lookup(struct net *net, + struct sk_buff *skb, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + struct sctp_association *asoc = NULL; + sctp_chunkhdr_t *ch; + int have_auth = 0; + unsigned int chunk_num = 1; + __u8 *ch_end; + + /* Walk through the chunks looking for AUTH or ASCONF chunks + * to help us find the association. + */ + ch = (sctp_chunkhdr_t *) skb->data; + do { + /* Break out if chunk length is less then minimal. */ + if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) + break; + + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb_tail_pointer(skb)) + break; + + switch (ch->type) { + case SCTP_CID_AUTH: + have_auth = chunk_num; + break; + + case SCTP_CID_COOKIE_ECHO: + /* If a packet arrives containing an AUTH chunk as + * a first chunk, a COOKIE-ECHO chunk as the second + * chunk, and possibly more chunks after them, and + * the receiver does not have an STCB for that + * packet, then authentication is based on + * the contents of the COOKIE- ECHO chunk. + */ + if (have_auth == 1 && chunk_num == 2) + return NULL; + break; + + case SCTP_CID_ASCONF: + if (have_auth || net->sctp.addip_noauth) + asoc = __sctp_rcv_asconf_lookup( + net, ch, laddr, + sctp_hdr(skb)->source, + transportp); + default: + break; + } + + if (asoc) + break; + + ch = (sctp_chunkhdr_t *) ch_end; + chunk_num++; + } while (ch_end < skb_tail_pointer(skb)); + + return asoc; +} + +/* + * There are circumstances when we need to look inside the SCTP packet + * for information to help us find the association. Examples + * include looking inside of INIT/INIT-ACK chunks or after the AUTH + * chunks. + */ +static struct sctp_association *__sctp_rcv_lookup_harder(struct net *net, + struct sk_buff *skb, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + sctp_chunkhdr_t *ch; + + ch = (sctp_chunkhdr_t *) skb->data; + + /* The code below will attempt to walk the chunk and extract + * parameter information. Before we do that, we need to verify + * that the chunk length doesn't cause overflow. Otherwise, we'll + * walk off the end. + */ + if (WORD_ROUND(ntohs(ch->length)) > skb->len) + return NULL; + + /* If this is INIT/INIT-ACK look inside the chunk too. */ + if (ch->type == SCTP_CID_INIT || ch->type == SCTP_CID_INIT_ACK) + return __sctp_rcv_init_lookup(net, skb, laddr, transportp); + + return __sctp_rcv_walk_lookup(net, skb, laddr, transportp); +} + +/* Lookup an association for an inbound skb. */ +static struct sctp_association *__sctp_rcv_lookup(struct net *net, + struct sk_buff *skb, + const union sctp_addr *paddr, + const union sctp_addr *laddr, + struct sctp_transport **transportp) +{ + struct sctp_association *asoc; + + asoc = __sctp_lookup_association(net, laddr, paddr, transportp); + + /* Further lookup for INIT/INIT-ACK packets. + * SCTP Implementors Guide, 2.18 Handling of address + * parameters within the INIT or INIT-ACK. + */ + if (!asoc) + asoc = __sctp_rcv_lookup_harder(net, skb, laddr, transportp); + + return asoc; +} diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c new file mode 100644 index 000000000..7e8a16c77 --- /dev/null +++ b/net/sctp/inqueue.c @@ -0,0 +1,214 @@ +/* SCTP kernel implementation + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2002 International Business Machines, Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions are the methods for accessing the SCTP inqueue. + * + * An SCTP inqueue is a queue into which you push SCTP packets + * (which might be bundles or fragments of chunks) and out of which you + * pop SCTP whole chunks. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> +#include <linux/interrupt.h> +#include <linux/slab.h> + +/* Initialize an SCTP inqueue. */ +void sctp_inq_init(struct sctp_inq *queue) +{ + INIT_LIST_HEAD(&queue->in_chunk_list); + queue->in_progress = NULL; + + /* Create a task for delivering data. */ + INIT_WORK(&queue->immediate, NULL); +} + +/* Release the memory associated with an SCTP inqueue. */ +void sctp_inq_free(struct sctp_inq *queue) +{ + struct sctp_chunk *chunk, *tmp; + + /* Empty the queue. */ + list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { + list_del_init(&chunk->list); + sctp_chunk_free(chunk); + } + + /* If there is a packet which is currently being worked on, + * free it as well. + */ + if (queue->in_progress) { + sctp_chunk_free(queue->in_progress); + queue->in_progress = NULL; + } +} + +/* Put a new packet in an SCTP inqueue. + * We assume that packet->sctp_hdr is set and in host byte order. + */ +void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) +{ + /* Directly call the packet handling routine. */ + if (chunk->rcvr->dead) { + sctp_chunk_free(chunk); + return; + } + + /* We are now calling this either from the soft interrupt + * or from the backlog processing. + * Eventually, we should clean up inqueue to not rely + * on the BH related data structures. + */ + list_add_tail(&chunk->list, &q->in_chunk_list); + if (chunk->asoc) + chunk->asoc->stats.ipackets++; + q->immediate.func(&q->immediate); +} + +/* Peek at the next chunk on the inqeue. */ +struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) +{ + struct sctp_chunk *chunk; + sctp_chunkhdr_t *ch = NULL; + + chunk = queue->in_progress; + /* If there is no more chunks in this packet, say so */ + if (chunk->singleton || + chunk->end_of_packet || + chunk->pdiscard) + return NULL; + + ch = (sctp_chunkhdr_t *)chunk->chunk_end; + + return ch; +} + + +/* Extract a chunk from an SCTP inqueue. + * + * WARNING: If you need to put the chunk on another queue, you need to + * make a shallow copy (clone) of it. + */ +struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) +{ + struct sctp_chunk *chunk; + sctp_chunkhdr_t *ch = NULL; + + /* The assumption is that we are safe to process the chunks + * at this time. + */ + + if ((chunk = queue->in_progress)) { + /* There is a packet that we have been working on. + * Any post processing work to do before we move on? + */ + if (chunk->singleton || + chunk->end_of_packet || + chunk->pdiscard) { + sctp_chunk_free(chunk); + chunk = queue->in_progress = NULL; + } else { + /* Nothing to do. Next chunk in the packet, please. */ + ch = (sctp_chunkhdr_t *) chunk->chunk_end; + /* Force chunk->skb->data to chunk->chunk_end. */ + skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); + /* We are guaranteed to pull a SCTP header. */ + } + } + + /* Do we need to take the next packet out of the queue to process? */ + if (!chunk) { + struct list_head *entry; + + /* Is the queue empty? */ + if (list_empty(&queue->in_chunk_list)) + return NULL; + + entry = queue->in_chunk_list.next; + chunk = queue->in_progress = + list_entry(entry, struct sctp_chunk, list); + list_del_init(entry); + + /* This is the first chunk in the packet. */ + chunk->singleton = 1; + ch = (sctp_chunkhdr_t *) chunk->skb->data; + chunk->data_accepted = 0; + } + + chunk->chunk_hdr = ch; + chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + /* In the unlikely case of an IP reassembly, the skb could be + * non-linear. If so, update chunk_end so that it doesn't go past + * the skb->tail. + */ + if (unlikely(skb_is_nonlinear(chunk->skb))) { + if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) + chunk->chunk_end = skb_tail_pointer(chunk->skb); + } + skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); + chunk->subh.v = NULL; /* Subheader is no longer valid. */ + + if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < + skb_tail_pointer(chunk->skb)) { + /* This is not a singleton */ + chunk->singleton = 0; + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { + /* Discard inside state machine. */ + chunk->pdiscard = 1; + chunk->chunk_end = skb_tail_pointer(chunk->skb); + } else { + /* We are at the end of the packet, so mark the chunk + * in case we need to send a SACK. + */ + chunk->end_of_packet = 1; + } + + pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n", + chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), + ntohs(chunk->chunk_hdr->length), chunk->skb->len); + + return chunk; +} + +/* Set a top-half handler. + * + * Originally, we the top-half handler was scheduled as a BH. We now + * call the handler directly in sctp_inq_push() at a time that + * we know we are lock safe. + * The intent is that this routine will pull stuff out of the + * inqueue and process it. + */ +void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) +{ + INIT_WORK(&q->immediate, callback); +} diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c new file mode 100644 index 000000000..0e4198ee2 --- /dev/null +++ b/net/sctp/ipv6.c @@ -0,0 +1,1076 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2002, 2004 + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * Copyright (c) 2002-2003 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * SCTP over IPv6. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Le Yanqun <yanqun.le@nokia.com> + * Hui Huang <hui.huang@nokia.com> + * La Monte H.P. Yarroll <piggy@acm.org> + * Sridhar Samudrala <sri@us.ibm.com> + * Jon Grimm <jgrimm@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + * + * Based on: + * linux/net/ipv6/tcp_ipv6.c + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/sockios.h> +#include <linux/net.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/netdevice.h> +#include <linux/init.h> +#include <linux/ipsec.h> +#include <linux/slab.h> + +#include <linux/ipv6.h> +#include <linux/icmpv6.h> +#include <linux/random.h> +#include <linux/seq_file.h> + +#include <net/protocol.h> +#include <net/ndisc.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/transp_v6.h> +#include <net/addrconf.h> +#include <net/ip6_route.h> +#include <net/inet_common.h> +#include <net/inet_ecn.h> +#include <net/sctp/sctp.h> + +#include <asm/uaccess.h> + +static inline int sctp_v6_addr_match_len(union sctp_addr *s1, + union sctp_addr *s2); +static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + __be16 port); +static int sctp_v6_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2); + +/* Event handler for inet6 address addition/deletion events. + * The sctp_local_addr_list needs to be protocted by a spin lock since + * multiple notifiers (say IPv4 and IPv6) may be running at the same + * time and thus corrupt the list. + * The reader side is protected with RCU. + */ +static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, + void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct sctp_sockaddr_entry *addr = NULL; + struct sctp_sockaddr_entry *temp; + struct net *net = dev_net(ifa->idev->dev); + int found = 0; + + switch (ev) { + case NETDEV_UP: + addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); + if (addr) { + addr->a.v6.sin6_family = AF_INET6; + addr->a.v6.sin6_port = 0; + addr->a.v6.sin6_addr = ifa->addr; + addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex; + addr->valid = 1; + spin_lock_bh(&net->sctp.local_addr_lock); + list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list); + sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW); + spin_unlock_bh(&net->sctp.local_addr_lock); + } + break; + case NETDEV_DOWN: + spin_lock_bh(&net->sctp.local_addr_lock); + list_for_each_entry_safe(addr, temp, + &net->sctp.local_addr_list, list) { + if (addr->a.sa.sa_family == AF_INET6 && + ipv6_addr_equal(&addr->a.v6.sin6_addr, + &ifa->addr)) { + sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); + found = 1; + addr->valid = 0; + list_del_rcu(&addr->list); + break; + } + } + spin_unlock_bh(&net->sctp.local_addr_lock); + if (found) + kfree_rcu(addr, rcu); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block sctp_inet6addr_notifier = { + .notifier_call = sctp_inet6addr_event, +}; + +/* ICMP error handler. */ +static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + u8 type, u8 code, int offset, __be32 info) +{ + struct inet6_dev *idev; + struct sock *sk; + struct sctp_association *asoc; + struct sctp_transport *transport; + struct ipv6_pinfo *np; + __u16 saveip, savesctp; + int err; + struct net *net = dev_net(skb->dev); + + idev = in6_dev_get(skb->dev); + + /* Fix up skb to look at the embedded net header. */ + saveip = skb->network_header; + savesctp = skb->transport_header; + skb_reset_network_header(skb); + skb_set_transport_header(skb, offset); + sk = sctp_err_lookup(net, AF_INET6, skb, sctp_hdr(skb), &asoc, &transport); + /* Put back, the original pointers. */ + skb->network_header = saveip; + skb->transport_header = savesctp; + if (!sk) { + ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS); + goto out; + } + + /* Warning: The sock lock is held. Remember to call + * sctp_err_finish! + */ + + switch (type) { + case ICMPV6_PKT_TOOBIG: + if (ip6_sk_accept_pmtu(sk)) + sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info)); + goto out_unlock; + case ICMPV6_PARAMPROB: + if (ICMPV6_UNK_NEXTHDR == code) { + sctp_icmp_proto_unreachable(sk, asoc, transport); + goto out_unlock; + } + break; + case NDISC_REDIRECT: + sctp_icmp_redirect(sk, transport, skb); + goto out_unlock; + default: + break; + } + + np = inet6_sk(sk); + icmpv6_err_convert(type, code, &err); + if (!sock_owned_by_user(sk) && np->recverr) { + sk->sk_err = err; + sk->sk_error_report(sk); + } else { /* Only an error on timeout */ + sk->sk_err_soft = err; + } + +out_unlock: + sctp_err_finish(sk, asoc); +out: + if (likely(idev != NULL)) + in6_dev_put(idev); +} + +static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) +{ + struct sock *sk = skb->sk; + struct ipv6_pinfo *np = inet6_sk(sk); + struct flowi6 *fl6 = &transport->fl.u.ip6; + + pr_debug("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, + skb->len, &fl6->saddr, &fl6->daddr); + + IP6_ECN_flow_xmit(sk, fl6->flowlabel); + + if (!(transport->param_flags & SPP_PMTUD_ENABLE)) + skb->ignore_df = 1; + + SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS); + + return ip6_xmit(sk, skb, fl6, np->opt, np->tclass); +} + +/* Returns the dst cache entry for the given source and destination ip + * addresses. + */ +static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + struct flowi *fl, struct sock *sk) +{ + struct sctp_association *asoc = t->asoc; + struct dst_entry *dst = NULL; + struct flowi6 *fl6 = &fl->u.ip6; + struct sctp_bind_addr *bp; + struct ipv6_pinfo *np = inet6_sk(sk); + struct sctp_sockaddr_entry *laddr; + union sctp_addr *baddr = NULL; + union sctp_addr *daddr = &t->ipaddr; + union sctp_addr dst_saddr; + struct in6_addr *final_p, final; + __u8 matchlen = 0; + __u8 bmatchlen; + sctp_scope_t scope; + + memset(fl6, 0, sizeof(struct flowi6)); + fl6->daddr = daddr->v6.sin6_addr; + fl6->fl6_dport = daddr->v6.sin6_port; + fl6->flowi6_proto = IPPROTO_SCTP; + if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) + fl6->flowi6_oif = daddr->v6.sin6_scope_id; + + pr_debug("%s: dst=%pI6 ", __func__, &fl6->daddr); + + if (asoc) + fl6->fl6_sport = htons(asoc->base.bind_addr.port); + + if (saddr) { + fl6->saddr = saddr->v6.sin6_addr; + fl6->fl6_sport = saddr->v6.sin6_port; + + pr_debug("src=%pI6 - ", &fl6->saddr); + } + + final_p = fl6_update_dst(fl6, np->opt, &final); + dst = ip6_dst_lookup_flow(sk, fl6, final_p); + if (!asoc || saddr) + goto out; + + bp = &asoc->base.bind_addr; + scope = sctp_scope(daddr); + /* ip6_dst_lookup has filled in the fl6->saddr for us. Check + * to see if we can use it. + */ + if (!IS_ERR(dst)) { + /* Walk through the bind address list and look for a bind + * address that matches the source address of the returned dst. + */ + sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid || laddr->state == SCTP_ADDR_DEL || + (laddr->state != SCTP_ADDR_SRC && + !asoc->src_out_of_asoc_ok)) + continue; + + /* Do not compare against v4 addrs */ + if ((laddr->a.sa.sa_family == AF_INET6) && + (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) { + rcu_read_unlock(); + goto out; + } + } + rcu_read_unlock(); + /* None of the bound addresses match the source address of the + * dst. So release it. + */ + dst_release(dst); + dst = NULL; + } + + /* Walk through the bind address list and try to get the + * best source address for a given destination. + */ + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid) + continue; + if ((laddr->state == SCTP_ADDR_SRC) && + (laddr->a.sa.sa_family == AF_INET6) && + (scope <= sctp_scope(&laddr->a))) { + bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); + if (!baddr || (matchlen < bmatchlen)) { + baddr = &laddr->a; + matchlen = bmatchlen; + } + } + } + rcu_read_unlock(); + + if (baddr) { + fl6->saddr = baddr->v6.sin6_addr; + fl6->fl6_sport = baddr->v6.sin6_port; + final_p = fl6_update_dst(fl6, np->opt, &final); + dst = ip6_dst_lookup_flow(sk, fl6, final_p); + } + +out: + if (!IS_ERR_OR_NULL(dst)) { + struct rt6_info *rt; + + rt = (struct rt6_info *)dst; + t->dst = dst; + t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; + pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr, + &fl6->saddr); + } else { + t->dst = NULL; + + pr_debug("no route\n"); + } +} + +/* Returns the number of consecutive initial bits that match in the 2 ipv6 + * addresses. + */ +static inline int sctp_v6_addr_match_len(union sctp_addr *s1, + union sctp_addr *s2) +{ + return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr); +} + +/* Fills in the source address(saddr) based on the destination address(daddr) + * and asoc's bind address list. + */ +static void sctp_v6_get_saddr(struct sctp_sock *sk, + struct sctp_transport *t, + struct flowi *fl) +{ + struct flowi6 *fl6 = &fl->u.ip6; + union sctp_addr *saddr = &t->saddr; + + pr_debug("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst); + + if (t->dst) { + saddr->v6.sin6_family = AF_INET6; + saddr->v6.sin6_addr = fl6->saddr; + } +} + +/* Make a copy of all potential local addresses. */ +static void sctp_v6_copy_addrlist(struct list_head *addrlist, + struct net_device *dev) +{ + struct inet6_dev *in6_dev; + struct inet6_ifaddr *ifp; + struct sctp_sockaddr_entry *addr; + + rcu_read_lock(); + if ((in6_dev = __in6_dev_get(dev)) == NULL) { + rcu_read_unlock(); + return; + } + + read_lock_bh(&in6_dev->lock); + list_for_each_entry(ifp, &in6_dev->addr_list, if_list) { + /* Add the address to the local list. */ + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); + if (addr) { + addr->a.v6.sin6_family = AF_INET6; + addr->a.v6.sin6_port = 0; + addr->a.v6.sin6_addr = ifp->addr; + addr->a.v6.sin6_scope_id = dev->ifindex; + addr->valid = 1; + INIT_LIST_HEAD(&addr->list); + list_add_tail(&addr->list, addrlist); + } + } + + read_unlock_bh(&in6_dev->lock); + rcu_read_unlock(); +} + +/* Initialize a sockaddr_storage from in incoming skb. */ +static void sctp_v6_from_skb(union sctp_addr *addr, struct sk_buff *skb, + int is_saddr) +{ + __be16 *port; + struct sctphdr *sh; + + port = &addr->v6.sin6_port; + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_flowinfo = 0; /* FIXME */ + addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; + + sh = sctp_hdr(skb); + if (is_saddr) { + *port = sh->source; + addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; + } else { + *port = sh->dest; + addr->v6.sin6_addr = ipv6_hdr(skb)->daddr; + } +} + +/* Initialize an sctp_addr from a socket. */ +static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) +{ + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_port = 0; + addr->v6.sin6_addr = sk->sk_v6_rcv_saddr; +} + +/* Initialize sk->sk_rcv_saddr from sctp_addr. */ +static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) +{ + if (addr->sa.sa_family == AF_INET) { + sk->sk_v6_rcv_saddr.s6_addr32[0] = 0; + sk->sk_v6_rcv_saddr.s6_addr32[1] = 0; + sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff); + sk->sk_v6_rcv_saddr.s6_addr32[3] = + addr->v4.sin_addr.s_addr; + } else { + sk->sk_v6_rcv_saddr = addr->v6.sin6_addr; + } +} + +/* Initialize sk->sk_daddr from sctp_addr. */ +static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) +{ + if (addr->sa.sa_family == AF_INET) { + sk->sk_v6_daddr.s6_addr32[0] = 0; + sk->sk_v6_daddr.s6_addr32[1] = 0; + sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff); + sk->sk_v6_daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; + } else { + sk->sk_v6_daddr = addr->v6.sin6_addr; + } +} + +/* Initialize a sctp_addr from an address parameter. */ +static void sctp_v6_from_addr_param(union sctp_addr *addr, + union sctp_addr_param *param, + __be16 port, int iif) +{ + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_port = port; + addr->v6.sin6_flowinfo = 0; /* BUG */ + addr->v6.sin6_addr = param->v6.addr; + addr->v6.sin6_scope_id = iif; +} + +/* Initialize an address parameter from a sctp_addr and return the length + * of the address parameter. + */ +static int sctp_v6_to_addr_param(const union sctp_addr *addr, + union sctp_addr_param *param) +{ + int length = sizeof(sctp_ipv6addr_param_t); + + param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; + param->v6.param_hdr.length = htons(length); + param->v6.addr = addr->v6.sin6_addr; + + return length; +} + +/* Initialize a sctp_addr from struct in6_addr. */ +static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, + __be16 port) +{ + addr->sa.sa_family = AF_INET6; + addr->v6.sin6_port = port; + addr->v6.sin6_addr = *saddr; +} + +/* Compare addresses exactly. + * v4-mapped-v6 is also in consideration. + */ +static int sctp_v6_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2) +{ + if (addr1->sa.sa_family != addr2->sa.sa_family) { + if (addr1->sa.sa_family == AF_INET && + addr2->sa.sa_family == AF_INET6 && + ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { + if (addr2->v6.sin6_port == addr1->v4.sin_port && + addr2->v6.sin6_addr.s6_addr32[3] == + addr1->v4.sin_addr.s_addr) + return 1; + } + if (addr2->sa.sa_family == AF_INET && + addr1->sa.sa_family == AF_INET6 && + ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { + if (addr1->v6.sin6_port == addr2->v4.sin_port && + addr1->v6.sin6_addr.s6_addr32[3] == + addr2->v4.sin_addr.s_addr) + return 1; + } + return 0; + } + if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) + return 0; + /* If this is a linklocal address, compare the scope_id. */ + if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { + if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && + (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { + return 0; + } + } + + return 1; +} + +/* Initialize addr struct to INADDR_ANY. */ +static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) +{ + memset(addr, 0x00, sizeof(union sctp_addr)); + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_port = port; +} + +/* Is this a wildcard address? */ +static int sctp_v6_is_any(const union sctp_addr *addr) +{ + return ipv6_addr_any(&addr->v6.sin6_addr); +} + +/* Should this be available for binding? */ +static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) +{ + int type; + const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr; + + type = ipv6_addr_type(in6); + if (IPV6_ADDR_ANY == type) + return 1; + if (type == IPV6_ADDR_MAPPED) { + if (sp && ipv6_only_sock(sctp_opt2sk(sp))) + return 0; + sctp_v6_map_v4(addr); + return sctp_get_af_specific(AF_INET)->available(addr, sp); + } + if (!(type & IPV6_ADDR_UNICAST)) + return 0; + + return ipv6_chk_addr(sock_net(&sp->inet.sk), in6, NULL, 0); +} + +/* This function checks if the address is a valid address to be used for + * SCTP. + * + * Output: + * Return 0 - If the address is a non-unicast or an illegal address. + * Return 1 - If the address is a unicast. + */ +static int sctp_v6_addr_valid(union sctp_addr *addr, + struct sctp_sock *sp, + const struct sk_buff *skb) +{ + int ret = ipv6_addr_type(&addr->v6.sin6_addr); + + /* Support v4-mapped-v6 address. */ + if (ret == IPV6_ADDR_MAPPED) { + /* Note: This routine is used in input, so v4-mapped-v6 + * are disallowed here when there is no sctp_sock. + */ + if (sp && ipv6_only_sock(sctp_opt2sk(sp))) + return 0; + sctp_v6_map_v4(addr); + return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp, skb); + } + + /* Is this a non-unicast address */ + if (!(ret & IPV6_ADDR_UNICAST)) + return 0; + + return 1; +} + +/* What is the scope of 'addr'? */ +static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) +{ + int v6scope; + sctp_scope_t retval; + + /* The IPv6 scope is really a set of bit fields. + * See IFA_* in <net/if_inet6.h>. Map to a generic SCTP scope. + */ + + v6scope = ipv6_addr_scope(&addr->v6.sin6_addr); + switch (v6scope) { + case IFA_HOST: + retval = SCTP_SCOPE_LOOPBACK; + break; + case IFA_LINK: + retval = SCTP_SCOPE_LINK; + break; + case IFA_SITE: + retval = SCTP_SCOPE_PRIVATE; + break; + default: + retval = SCTP_SCOPE_GLOBAL; + break; + } + + return retval; +} + +/* Create and initialize a new sk for the socket to be returned by accept(). */ +static struct sock *sctp_v6_create_accept_sk(struct sock *sk, + struct sctp_association *asoc) +{ + struct sock *newsk; + struct ipv6_pinfo *newnp, *np = inet6_sk(sk); + struct sctp6_sock *newsctp6sk; + + newsk = sk_alloc(sock_net(sk), PF_INET6, GFP_KERNEL, sk->sk_prot); + if (!newsk) + goto out; + + sock_init_data(NULL, newsk); + + sctp_copy_sock(newsk, sk, asoc); + sock_reset_flag(sk, SOCK_ZAPPED); + + newsctp6sk = (struct sctp6_sock *)newsk; + inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; + + sctp_sk(newsk)->v4mapped = sctp_sk(sk)->v4mapped; + + newnp = inet6_sk(newsk); + + memcpy(newnp, np, sizeof(struct ipv6_pinfo)); + + /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() + * and getpeername(). + */ + sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); + + newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr; + + sk_refcnt_debug_inc(newsk); + + if (newsk->sk_prot->init(newsk)) { + sk_common_release(newsk); + newsk = NULL; + } + +out: + return newsk; +} + +/* Format a sockaddr for return to user space. This makes sure the return is + * AF_INET or AF_INET6 depending on the SCTP_I_WANT_MAPPED_V4_ADDR option. + */ +static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) +{ + if (sp->v4mapped) { + if (addr->sa.sa_family == AF_INET) + sctp_v4_map_v6(addr); + } else { + if (addr->sa.sa_family == AF_INET6 && + ipv6_addr_v4mapped(&addr->v6.sin6_addr)) + sctp_v6_map_v4(addr); + } + + if (addr->sa.sa_family == AF_INET) + return sizeof(struct sockaddr_in); + return sizeof(struct sockaddr_in6); +} + +/* Where did this skb come from? */ +static int sctp_v6_skb_iif(const struct sk_buff *skb) +{ + struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; + return opt->iif; +} + +/* Was this packet marked by Explicit Congestion Notification? */ +static int sctp_v6_is_ce(const struct sk_buff *skb) +{ + return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); +} + +/* Dump the v6 addr to the seq file. */ +static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) +{ + seq_printf(seq, "%pI6 ", &addr->v6.sin6_addr); +} + +static void sctp_v6_ecn_capable(struct sock *sk) +{ + inet6_sk(sk)->tclass |= INET_ECN_ECT_0; +} + +/* Initialize a PF_INET msgname from a ulpevent. */ +static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, + char *msgname, int *addrlen) +{ + union sctp_addr *addr; + struct sctp_association *asoc; + union sctp_addr *paddr; + + if (!msgname) + return; + + addr = (union sctp_addr *)msgname; + asoc = event->asoc; + paddr = &asoc->peer.primary_addr; + + if (paddr->sa.sa_family == AF_INET) { + addr->v4.sin_family = AF_INET; + addr->v4.sin_port = htons(asoc->peer.port); + addr->v4.sin_addr = paddr->v4.sin_addr; + } else { + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_flowinfo = 0; + if (ipv6_addr_type(&paddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) + addr->v6.sin6_scope_id = paddr->v6.sin6_scope_id; + else + addr->v6.sin6_scope_id = 0; + addr->v6.sin6_port = htons(asoc->peer.port); + addr->v6.sin6_addr = paddr->v6.sin6_addr; + } + + *addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr); +} + +/* Initialize a msg_name from an inbound skb. */ +static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, + int *addr_len) +{ + union sctp_addr *addr; + struct sctphdr *sh; + + if (!msgname) + return; + + addr = (union sctp_addr *)msgname; + sh = sctp_hdr(skb); + + if (ip_hdr(skb)->version == 4) { + addr->v4.sin_family = AF_INET; + addr->v4.sin_port = sh->source; + addr->v4.sin_addr.s_addr = ip_hdr(skb)->saddr; + } else { + addr->v6.sin6_family = AF_INET6; + addr->v6.sin6_flowinfo = 0; + addr->v6.sin6_port = sh->source; + addr->v6.sin6_addr = ipv6_hdr(skb)->saddr; + if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { + struct sctp_ulpevent *ev = sctp_skb2event(skb); + addr->v6.sin6_scope_id = ev->iif; + } + } + + *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr); +} + +/* Do we support this AF? */ +static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) +{ + switch (family) { + case AF_INET6: + return 1; + /* v4-mapped-v6 addresses */ + case AF_INET: + if (!__ipv6_only_sock(sctp_opt2sk(sp))) + return 1; + default: + return 0; + } +} + +/* Address matching with wildcards allowed. This extra level + * of indirection lets us choose whether a PF_INET6 should + * disallow any v4 addresses if we so choose. + */ +static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2, + struct sctp_sock *opt) +{ + struct sctp_af *af1, *af2; + struct sock *sk = sctp_opt2sk(opt); + + af1 = sctp_get_af_specific(addr1->sa.sa_family); + af2 = sctp_get_af_specific(addr2->sa.sa_family); + + if (!af1 || !af2) + return 0; + + /* If the socket is IPv6 only, v4 addrs will not match */ + if (__ipv6_only_sock(sk) && af1 != af2) + return 0; + + /* Today, wildcard AF_INET/AF_INET6. */ + if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) + return 1; + + if (addr1->sa.sa_family != addr2->sa.sa_family) + return 0; + + return af1->cmp_addr(addr1, addr2); +} + +/* Verify that the provided sockaddr looks bindable. Common verification, + * has already been taken care of. + */ +static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) +{ + struct sctp_af *af; + + /* ASSERT: address family has already been verified. */ + if (addr->sa.sa_family != AF_INET6) + af = sctp_get_af_specific(addr->sa.sa_family); + else { + int type = ipv6_addr_type(&addr->v6.sin6_addr); + struct net_device *dev; + + if (type & IPV6_ADDR_LINKLOCAL) { + struct net *net; + if (!addr->v6.sin6_scope_id) + return 0; + net = sock_net(&opt->inet.sk); + rcu_read_lock(); + dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); + if (!dev || + !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) { + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + } + + af = opt->pf->af; + } + return af->available(addr, opt); +} + +/* Verify that the provided sockaddr looks sendable. Common verification, + * has already been taken care of. + */ +static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) +{ + struct sctp_af *af = NULL; + + /* ASSERT: address family has already been verified. */ + if (addr->sa.sa_family != AF_INET6) + af = sctp_get_af_specific(addr->sa.sa_family); + else { + int type = ipv6_addr_type(&addr->v6.sin6_addr); + struct net_device *dev; + + if (type & IPV6_ADDR_LINKLOCAL) { + if (!addr->v6.sin6_scope_id) + return 0; + rcu_read_lock(); + dev = dev_get_by_index_rcu(sock_net(&opt->inet.sk), + addr->v6.sin6_scope_id); + rcu_read_unlock(); + if (!dev) + return 0; + } + af = opt->pf->af; + } + + return af != NULL; +} + +/* Fill in Supported Address Type information for INIT and INIT-ACK + * chunks. Note: In the future, we may want to look at sock options + * to determine whether a PF_INET6 socket really wants to have IPV4 + * addresses. + * Returns number of addresses supported. + */ +static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, + __be16 *types) +{ + types[0] = SCTP_PARAM_IPV6_ADDRESS; + if (!opt || !ipv6_only_sock(sctp_opt2sk(opt))) { + types[1] = SCTP_PARAM_IPV4_ADDRESS; + return 2; + } + return 1; +} + +/* Handle SCTP_I_WANT_MAPPED_V4_ADDR for getpeername() and getsockname() */ +static int sctp_getname(struct socket *sock, struct sockaddr *uaddr, + int *uaddr_len, int peer) +{ + int rc; + + rc = inet6_getname(sock, uaddr, uaddr_len, peer); + + if (rc != 0) + return rc; + + *uaddr_len = sctp_v6_addr_to_user(sctp_sk(sock->sk), + (union sctp_addr *)uaddr); + + return rc; +} + +static const struct proto_ops inet6_seqpacket_ops = { + .family = PF_INET6, + .owner = THIS_MODULE, + .release = inet6_release, + .bind = inet6_bind, + .connect = inet_dgram_connect, + .socketpair = sock_no_socketpair, + .accept = inet_accept, + .getname = sctp_getname, + .poll = sctp_poll, + .ioctl = inet6_ioctl, + .listen = sctp_inet_listen, + .shutdown = inet_shutdown, + .setsockopt = sock_common_setsockopt, + .getsockopt = sock_common_getsockopt, + .sendmsg = inet_sendmsg, + .recvmsg = sock_common_recvmsg, + .mmap = sock_no_mmap, +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_sock_common_setsockopt, + .compat_getsockopt = compat_sock_common_getsockopt, +#endif +}; + +static struct inet_protosw sctpv6_seqpacket_protosw = { + .type = SOCK_SEQPACKET, + .protocol = IPPROTO_SCTP, + .prot = &sctpv6_prot, + .ops = &inet6_seqpacket_ops, + .flags = SCTP_PROTOSW_FLAG +}; +static struct inet_protosw sctpv6_stream_protosw = { + .type = SOCK_STREAM, + .protocol = IPPROTO_SCTP, + .prot = &sctpv6_prot, + .ops = &inet6_seqpacket_ops, + .flags = SCTP_PROTOSW_FLAG, +}; + +static int sctp6_rcv(struct sk_buff *skb) +{ + return sctp_rcv(skb) ? -1 : 0; +} + +static const struct inet6_protocol sctpv6_protocol = { + .handler = sctp6_rcv, + .err_handler = sctp_v6_err, + .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, +}; + +static struct sctp_af sctp_af_inet6 = { + .sa_family = AF_INET6, + .sctp_xmit = sctp_v6_xmit, + .setsockopt = ipv6_setsockopt, + .getsockopt = ipv6_getsockopt, + .get_dst = sctp_v6_get_dst, + .get_saddr = sctp_v6_get_saddr, + .copy_addrlist = sctp_v6_copy_addrlist, + .from_skb = sctp_v6_from_skb, + .from_sk = sctp_v6_from_sk, + .from_addr_param = sctp_v6_from_addr_param, + .to_addr_param = sctp_v6_to_addr_param, + .cmp_addr = sctp_v6_cmp_addr, + .scope = sctp_v6_scope, + .addr_valid = sctp_v6_addr_valid, + .inaddr_any = sctp_v6_inaddr_any, + .is_any = sctp_v6_is_any, + .available = sctp_v6_available, + .skb_iif = sctp_v6_skb_iif, + .is_ce = sctp_v6_is_ce, + .seq_dump_addr = sctp_v6_seq_dump_addr, + .ecn_capable = sctp_v6_ecn_capable, + .net_header_len = sizeof(struct ipv6hdr), + .sockaddr_len = sizeof(struct sockaddr_in6), +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ipv6_setsockopt, + .compat_getsockopt = compat_ipv6_getsockopt, +#endif +}; + +static struct sctp_pf sctp_pf_inet6 = { + .event_msgname = sctp_inet6_event_msgname, + .skb_msgname = sctp_inet6_skb_msgname, + .af_supported = sctp_inet6_af_supported, + .cmp_addr = sctp_inet6_cmp_addr, + .bind_verify = sctp_inet6_bind_verify, + .send_verify = sctp_inet6_send_verify, + .supported_addrs = sctp_inet6_supported_addrs, + .create_accept_sk = sctp_v6_create_accept_sk, + .addr_to_user = sctp_v6_addr_to_user, + .to_sk_saddr = sctp_v6_to_sk_saddr, + .to_sk_daddr = sctp_v6_to_sk_daddr, + .af = &sctp_af_inet6, +}; + +/* Initialize IPv6 support and register with socket layer. */ +void sctp_v6_pf_init(void) +{ + /* Register the SCTP specific PF_INET6 functions. */ + sctp_register_pf(&sctp_pf_inet6, PF_INET6); + + /* Register the SCTP specific AF_INET6 functions. */ + sctp_register_af(&sctp_af_inet6); +} + +void sctp_v6_pf_exit(void) +{ + list_del(&sctp_af_inet6.list); +} + +/* Initialize IPv6 support and register with socket layer. */ +int sctp_v6_protosw_init(void) +{ + int rc; + + rc = proto_register(&sctpv6_prot, 1); + if (rc) + return rc; + + /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */ + inet6_register_protosw(&sctpv6_seqpacket_protosw); + inet6_register_protosw(&sctpv6_stream_protosw); + + return 0; +} + +void sctp_v6_protosw_exit(void) +{ + inet6_unregister_protosw(&sctpv6_seqpacket_protosw); + inet6_unregister_protosw(&sctpv6_stream_protosw); + proto_unregister(&sctpv6_prot); +} + + +/* Register with inet6 layer. */ +int sctp_v6_add_protocol(void) +{ + /* Register notifier for inet6 address additions/deletions. */ + register_inet6addr_notifier(&sctp_inet6addr_notifier); + + if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0) + return -EAGAIN; + + return 0; +} + +/* Unregister with inet6 layer. */ +void sctp_v6_del_protocol(void) +{ + inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP); + unregister_inet6addr_notifier(&sctp_inet6addr_notifier); +} diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c new file mode 100644 index 000000000..40e7fac96 --- /dev/null +++ b/net/sctp/objcnt.c @@ -0,0 +1,142 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * + * This file is part of the SCTP kernel implementation + * + * Support for memory object debugging. This allows one to monitor the + * object allocations/deallocations for types instrumented for this + * via the proc fs. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Jon Grimm <jgrimm@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <net/sctp/sctp.h> + +/* + * Global counters to count raw object allocation counts. + * To add new counters, choose a unique suffix for the variable + * name as the helper macros key off this suffix to make + * life easier for the programmer. + */ + +SCTP_DBG_OBJCNT(sock); +SCTP_DBG_OBJCNT(ep); +SCTP_DBG_OBJCNT(transport); +SCTP_DBG_OBJCNT(assoc); +SCTP_DBG_OBJCNT(bind_addr); +SCTP_DBG_OBJCNT(bind_bucket); +SCTP_DBG_OBJCNT(chunk); +SCTP_DBG_OBJCNT(addr); +SCTP_DBG_OBJCNT(ssnmap); +SCTP_DBG_OBJCNT(datamsg); +SCTP_DBG_OBJCNT(keys); + +/* An array to make it easy to pretty print the debug information + * to the proc fs. + */ +static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { + SCTP_DBG_OBJCNT_ENTRY(sock), + SCTP_DBG_OBJCNT_ENTRY(ep), + SCTP_DBG_OBJCNT_ENTRY(assoc), + SCTP_DBG_OBJCNT_ENTRY(transport), + SCTP_DBG_OBJCNT_ENTRY(chunk), + SCTP_DBG_OBJCNT_ENTRY(bind_addr), + SCTP_DBG_OBJCNT_ENTRY(bind_bucket), + SCTP_DBG_OBJCNT_ENTRY(addr), + SCTP_DBG_OBJCNT_ENTRY(ssnmap), + SCTP_DBG_OBJCNT_ENTRY(datamsg), + SCTP_DBG_OBJCNT_ENTRY(keys), +}; + +/* Callback from procfs to read out objcount information. + * Walk through the entries in the sctp_dbg_objcnt array, dumping + * the raw object counts for each monitored type. + */ +static int sctp_objcnt_seq_show(struct seq_file *seq, void *v) +{ + int i; + + i = (int)*(loff_t *)v; + seq_setwidth(seq, 127); + seq_printf(seq, "%s: %d", sctp_dbg_objcnt[i].label, + atomic_read(sctp_dbg_objcnt[i].counter)); + seq_pad(seq, '\n'); + return 0; +} + +static void *sctp_objcnt_seq_start(struct seq_file *seq, loff_t *pos) +{ + return (*pos >= ARRAY_SIZE(sctp_dbg_objcnt)) ? NULL : (void *)pos; +} + +static void sctp_objcnt_seq_stop(struct seq_file *seq, void *v) +{ +} + +static void *sctp_objcnt_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + ++*pos; + return (*pos >= ARRAY_SIZE(sctp_dbg_objcnt)) ? NULL : (void *)pos; +} + +static const struct seq_operations sctp_objcnt_seq_ops = { + .start = sctp_objcnt_seq_start, + .next = sctp_objcnt_seq_next, + .stop = sctp_objcnt_seq_stop, + .show = sctp_objcnt_seq_show, +}; + +static int sctp_objcnt_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &sctp_objcnt_seq_ops); +} + +static const struct file_operations sctp_objcnt_ops = { + .open = sctp_objcnt_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +/* Initialize the objcount in the proc filesystem. */ +void sctp_dbg_objcnt_init(struct net *net) +{ + struct proc_dir_entry *ent; + + ent = proc_create("sctp_dbg_objcnt", 0, + net->sctp.proc_net_sctp, &sctp_objcnt_ops); + if (!ent) + pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n"); +} + +/* Cleanup the objcount entry in the proc filesystem. */ +void sctp_dbg_objcnt_exit(struct net *net) +{ + remove_proc_entry("sctp_dbg_objcnt", net->sctp.proc_net_sctp); +} + + diff --git a/net/sctp/output.c b/net/sctp/output.c new file mode 100644 index 000000000..abe7c2db2 --- /dev/null +++ b/net/sctp/output.c @@ -0,0 +1,788 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * + * This file is part of the SCTP kernel implementation + * + * These functions handle output processing. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@austin.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/time.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <net/inet_ecn.h> +#include <net/ip.h> +#include <net/icmp.h> +#include <net/net_namespace.h> + +#include <linux/socket.h> /* for sa_family_t */ +#include <net/sock.h> + +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> +#include <net/sctp/checksum.h> + +/* Forward declarations for private helpers. */ +static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk); +static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk); +static void sctp_packet_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk); +static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, + struct sctp_chunk *chunk, + u16 chunk_len); + +static void sctp_packet_reset(struct sctp_packet *packet) +{ + packet->size = packet->overhead; + packet->has_cookie_echo = 0; + packet->has_sack = 0; + packet->has_data = 0; + packet->has_auth = 0; + packet->ipfragok = 0; + packet->auth = NULL; +} + +/* Config a packet. + * This appears to be a followup set of initializations. + */ +struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, + __u32 vtag, int ecn_capable) +{ + struct sctp_chunk *chunk = NULL; + + pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag); + + packet->vtag = vtag; + + if (ecn_capable && sctp_packet_empty(packet)) { + chunk = sctp_get_ecne_prepend(packet->transport->asoc); + + /* If there a is a prepend chunk stick it on the list before + * any other chunks get appended. + */ + if (chunk) + sctp_packet_append_chunk(packet, chunk); + } + + return packet; +} + +/* Initialize the packet structure. */ +struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, + struct sctp_transport *transport, + __u16 sport, __u16 dport) +{ + struct sctp_association *asoc = transport->asoc; + size_t overhead; + + pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport); + + packet->transport = transport; + packet->source_port = sport; + packet->destination_port = dport; + INIT_LIST_HEAD(&packet->chunk_list); + if (asoc) { + struct sctp_sock *sp = sctp_sk(asoc->base.sk); + overhead = sp->pf->af->net_header_len; + } else { + overhead = sizeof(struct ipv6hdr); + } + overhead += sizeof(struct sctphdr); + packet->overhead = overhead; + sctp_packet_reset(packet); + packet->vtag = 0; + + return packet; +} + +/* Free a packet. */ +void sctp_packet_free(struct sctp_packet *packet) +{ + struct sctp_chunk *chunk, *tmp; + + pr_debug("%s: packet:%p\n", __func__, packet); + + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); + sctp_chunk_free(chunk); + } +} + +/* This routine tries to append the chunk to the offered packet. If adding + * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk + * is not present in the packet, it transmits the input packet. + * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long + * as it can fit in the packet, but any more data that does not fit in this + * packet can be sent only after receiving the COOKIE_ACK. + */ +sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk, + int one_packet) +{ + sctp_xmit_t retval; + int error = 0; + + pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); + + switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { + case SCTP_XMIT_PMTU_FULL: + if (!packet->has_cookie_echo) { + error = sctp_packet_transmit(packet); + if (error < 0) + chunk->skb->sk->sk_err = -error; + + /* If we have an empty packet, then we can NOT ever + * return PMTU_FULL. + */ + if (!one_packet) + retval = sctp_packet_append_chunk(packet, + chunk); + } + break; + + case SCTP_XMIT_RWND_FULL: + case SCTP_XMIT_OK: + case SCTP_XMIT_DELAY: + break; + } + + return retval; +} + +/* Try to bundle an auth chunk into the packet. */ +static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt, + struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = pkt->transport->asoc; + struct sctp_chunk *auth; + sctp_xmit_t retval = SCTP_XMIT_OK; + + /* if we don't have an association, we can't do authentication */ + if (!asoc) + return retval; + + /* See if this is an auth chunk we are bundling or if + * auth is already bundled. + */ + if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) + return retval; + + /* if the peer did not request this chunk to be authenticated, + * don't do it + */ + if (!chunk->auth) + return retval; + + auth = sctp_make_auth(asoc); + if (!auth) + return retval; + + retval = __sctp_packet_append_chunk(pkt, auth); + + if (retval != SCTP_XMIT_OK) + sctp_chunk_free(auth); + + return retval; +} + +/* Try to bundle a SACK with the packet. */ +static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, + struct sctp_chunk *chunk) +{ + sctp_xmit_t retval = SCTP_XMIT_OK; + + /* If sending DATA and haven't aleady bundled a SACK, try to + * bundle one in to the packet. + */ + if (sctp_chunk_is_data(chunk) && !pkt->has_sack && + !pkt->has_cookie_echo) { + struct sctp_association *asoc; + struct timer_list *timer; + asoc = pkt->transport->asoc; + timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; + + /* If the SACK timer is running, we have a pending SACK */ + if (timer_pending(timer)) { + struct sctp_chunk *sack; + + if (pkt->transport->sack_generation != + pkt->transport->asoc->peer.sack_generation) + return retval; + + asoc->a_rwnd = asoc->rwnd; + sack = sctp_make_sack(asoc); + if (sack) { + retval = __sctp_packet_append_chunk(pkt, sack); + if (retval != SCTP_XMIT_OK) { + sctp_chunk_free(sack); + goto out; + } + asoc->peer.sack_needed = 0; + if (del_timer(timer)) + sctp_association_put(asoc); + } + } + } +out: + return retval; +} + + +/* Append a chunk to the offered packet reporting back any inability to do + * so. + */ +static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk) +{ + sctp_xmit_t retval = SCTP_XMIT_OK; + __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); + + /* Check to see if this chunk will fit into the packet */ + retval = sctp_packet_will_fit(packet, chunk, chunk_len); + if (retval != SCTP_XMIT_OK) + goto finish; + + /* We believe that this chunk is OK to add to the packet */ + switch (chunk->chunk_hdr->type) { + case SCTP_CID_DATA: + /* Account for the data being in the packet */ + sctp_packet_append_data(packet, chunk); + /* Disallow SACK bundling after DATA. */ + packet->has_sack = 1; + /* Disallow AUTH bundling after DATA */ + packet->has_auth = 1; + /* Let it be knows that packet has DATA in it */ + packet->has_data = 1; + /* timestamp the chunk for rtx purposes */ + chunk->sent_at = jiffies; + break; + case SCTP_CID_COOKIE_ECHO: + packet->has_cookie_echo = 1; + break; + + case SCTP_CID_SACK: + packet->has_sack = 1; + if (chunk->asoc) + chunk->asoc->stats.osacks++; + break; + + case SCTP_CID_AUTH: + packet->has_auth = 1; + packet->auth = chunk; + break; + } + + /* It is OK to send this chunk. */ + list_add_tail(&chunk->list, &packet->chunk_list); + packet->size += chunk_len; + chunk->transport = packet->transport; +finish: + return retval; +} + +/* Append a chunk to the offered packet reporting back any inability to do + * so. + */ +sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, + struct sctp_chunk *chunk) +{ + sctp_xmit_t retval = SCTP_XMIT_OK; + + pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk); + + /* Data chunks are special. Before seeing what else we can + * bundle into this packet, check to see if we are allowed to + * send this DATA. + */ + if (sctp_chunk_is_data(chunk)) { + retval = sctp_packet_can_append_data(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + } + + /* Try to bundle AUTH chunk */ + retval = sctp_packet_bundle_auth(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + + /* Try to bundle SACK chunk */ + retval = sctp_packet_bundle_sack(packet, chunk); + if (retval != SCTP_XMIT_OK) + goto finish; + + retval = __sctp_packet_append_chunk(packet, chunk); + +finish: + return retval; +} + +static void sctp_packet_release_owner(struct sk_buff *skb) +{ + sk_free(skb->sk); +} + +static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sctp_packet_release_owner; + + /* + * The data chunks have already been accounted for in sctp_sendmsg(), + * therefore only reserve a single byte to keep socket around until + * the packet has been transmitted. + */ + atomic_inc(&sk->sk_wmem_alloc); +} + +/* All packets are sent to the network through this function from + * sctp_outq_tail(). + * + * The return value is a normal kernel error return value. + */ +int sctp_packet_transmit(struct sctp_packet *packet) +{ + struct sctp_transport *tp = packet->transport; + struct sctp_association *asoc = tp->asoc; + struct sctphdr *sh; + struct sk_buff *nskb; + struct sctp_chunk *chunk, *tmp; + struct sock *sk; + int err = 0; + int padding; /* How much padding do we need? */ + __u8 has_data = 0; + struct dst_entry *dst; + unsigned char *auth = NULL; /* pointer to auth in skb data */ + + pr_debug("%s: packet:%p\n", __func__, packet); + + /* Do NOT generate a chunkless packet. */ + if (list_empty(&packet->chunk_list)) + return err; + + /* Set up convenience variables... */ + chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); + sk = chunk->skb->sk; + + /* Allocate the new skb. */ + nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC); + if (!nskb) + goto nomem; + + /* Make sure the outbound skb has enough header room reserved. */ + skb_reserve(nskb, packet->overhead + MAX_HEADER); + + /* Set the owning socket so that we know where to get the + * destination IP address. + */ + sctp_packet_set_owner_w(nskb, sk); + + if (!sctp_transport_dst_check(tp)) { + sctp_transport_route(tp, NULL, sctp_sk(sk)); + if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { + sctp_assoc_sync_pmtu(sk, asoc); + } + } + dst = dst_clone(tp->dst); + if (!dst) + goto no_route; + skb_dst_set(nskb, dst); + + /* Build the SCTP header. */ + sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); + skb_reset_transport_header(nskb); + sh->source = htons(packet->source_port); + sh->dest = htons(packet->destination_port); + + /* From 6.8 Adler-32 Checksum Calculation: + * After the packet is constructed (containing the SCTP common + * header and one or more control or DATA chunks), the + * transmitter shall: + * + * 1) Fill in the proper Verification Tag in the SCTP common + * header and initialize the checksum field to 0's. + */ + sh->vtag = htonl(packet->vtag); + sh->checksum = 0; + + /** + * 6.10 Bundling + * + * An endpoint bundles chunks by simply including multiple + * chunks in one outbound SCTP packet. ... + */ + + /** + * 3.2 Chunk Field Descriptions + * + * The total length of a chunk (including Type, Length and + * Value fields) MUST be a multiple of 4 bytes. If the length + * of the chunk is not a multiple of 4 bytes, the sender MUST + * pad the chunk with all zero bytes and this padding is not + * included in the chunk length field. The sender should + * never pad with more than 3 bytes. + * + * [This whole comment explains WORD_ROUND() below.] + */ + + pr_debug("***sctp_transmit_packet***\n"); + + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); + if (sctp_chunk_is_data(chunk)) { + /* 6.3.1 C4) When data is in flight and when allowed + * by rule C5, a new RTT measurement MUST be made each + * round trip. Furthermore, new RTT measurements + * SHOULD be made no more than once per round-trip + * for a given destination transport address. + */ + + if (!chunk->resent && !tp->rto_pending) { + chunk->rtt_in_progress = 1; + tp->rto_pending = 1; + } + + has_data = 1; + } + + padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; + if (padding) + memset(skb_put(chunk->skb, padding), 0, padding); + + /* if this is the auth chunk that we are adding, + * store pointer where it will be added and put + * the auth into the packet. + */ + if (chunk == packet->auth) + auth = skb_tail_pointer(nskb); + + memcpy(skb_put(nskb, chunk->skb->len), + chunk->skb->data, chunk->skb->len); + + pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, " + "rtt_in_progress:%d\n", chunk, + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), + chunk->has_tsn ? "TSN" : "No TSN", + chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, + ntohs(chunk->chunk_hdr->length), chunk->skb->len, + chunk->rtt_in_progress); + + /* + * If this is a control chunk, this is our last + * reference. Free data chunks after they've been + * acknowledged or have failed. + */ + if (!sctp_chunk_is_data(chunk)) + sctp_chunk_free(chunk); + } + + /* SCTP-AUTH, Section 6.2 + * The sender MUST calculate the MAC as described in RFC2104 [2] + * using the hash function H as described by the MAC Identifier and + * the shared association key K based on the endpoint pair shared key + * described by the shared key identifier. The 'data' used for the + * computation of the AUTH-chunk is given by the AUTH chunk with its + * HMAC field set to zero (as shown in Figure 6) followed by all + * chunks that are placed after the AUTH chunk in the SCTP packet. + */ + if (auth) + sctp_auth_calculate_hmac(asoc, nskb, + (struct sctp_auth_chunk *)auth, + GFP_ATOMIC); + + /* 2) Calculate the Adler-32 checksum of the whole packet, + * including the SCTP common header and all the + * chunks. + * + * Note: Adler-32 is no longer applicable, as has been replaced + * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. + */ + if (!sctp_checksum_disable) { + if (!(dst->dev->features & NETIF_F_SCTP_CSUM) || + (dst_xfrm(dst) != NULL) || packet->ipfragok) { + sh->checksum = sctp_compute_cksum(nskb, 0); + } else { + /* no need to seed pseudo checksum for SCTP */ + nskb->ip_summed = CHECKSUM_PARTIAL; + nskb->csum_start = skb_transport_header(nskb) - nskb->head; + nskb->csum_offset = offsetof(struct sctphdr, checksum); + } + } + + /* IP layer ECN support + * From RFC 2481 + * "The ECN-Capable Transport (ECT) bit would be set by the + * data sender to indicate that the end-points of the + * transport protocol are ECN-capable." + * + * Now setting the ECT bit all the time, as it should not cause + * any problems protocol-wise even if our peer ignores it. + * + * Note: The works for IPv6 layer checks this bit too later + * in transmission. See IP6_ECN_flow_xmit(). + */ + tp->af_specific->ecn_capable(nskb->sk); + + /* Set up the IP options. */ + /* BUG: not implemented + * For v4 this all lives somewhere in sk->sk_opt... + */ + + /* Dump that on IP! */ + if (asoc) { + asoc->stats.opackets++; + if (asoc->peer.last_sent_to != tp) + /* Considering the multiple CPU scenario, this is a + * "correcter" place for last_sent_to. --xguo + */ + asoc->peer.last_sent_to = tp; + } + + if (has_data) { + struct timer_list *timer; + unsigned long timeout; + + /* Restart the AUTOCLOSE timer when sending data. */ + if (sctp_state(asoc, ESTABLISHED) && + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { + timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; + timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; + + if (!mod_timer(timer, jiffies + timeout)) + sctp_association_hold(asoc); + } + } + + pr_debug("***sctp_transmit_packet*** skb->len:%d\n", nskb->len); + + nskb->ignore_df = packet->ipfragok; + tp->af_specific->sctp_xmit(nskb, tp); + +out: + sctp_packet_reset(packet); + return err; +no_route: + kfree_skb(nskb); + + if (asoc) + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + + /* FIXME: Returning the 'err' will effect all the associations + * associated with a socket, although only one of the paths of the + * association is unreachable. + * The real failure of a transport or association can be passed on + * to the user via notifications. So setting this error may not be + * required. + */ + /* err = -EHOSTUNREACH; */ +err: + /* Control chunks are unreliable so just drop them. DATA chunks + * will get resent or dropped later. + */ + + list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { + list_del_init(&chunk->list); + if (!sctp_chunk_is_data(chunk)) + sctp_chunk_free(chunk); + } + goto out; +nomem: + err = -ENOMEM; + goto err; +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* This private function check to see if a chunk can be added */ +static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk) +{ + size_t datasize, rwnd, inflight, flight_size; + struct sctp_transport *transport = packet->transport; + struct sctp_association *asoc = transport->asoc; + struct sctp_outq *q = &asoc->outqueue; + + /* RFC 2960 6.1 Transmission of DATA Chunks + * + * A) At any given time, the data sender MUST NOT transmit new data to + * any destination transport address if its peer's rwnd indicates + * that the peer has no buffer space (i.e. rwnd is 0, see Section + * 6.2.1). However, regardless of the value of rwnd (including if it + * is 0), the data sender can always have one DATA chunk in flight to + * the receiver if allowed by cwnd (see rule B below). This rule + * allows the sender to probe for a change in rwnd that the sender + * missed due to the SACK having been lost in transit from the data + * receiver to the data sender. + */ + + rwnd = asoc->peer.rwnd; + inflight = q->outstanding_bytes; + flight_size = transport->flight_size; + + datasize = sctp_data_size(chunk); + + if (datasize > rwnd && inflight > 0) + /* We have (at least) one data chunk in flight, + * so we can't fall back to rule 6.1 B). + */ + return SCTP_XMIT_RWND_FULL; + + /* RFC 2960 6.1 Transmission of DATA Chunks + * + * B) At any given time, the sender MUST NOT transmit new data + * to a given transport address if it has cwnd or more bytes + * of data outstanding to that transport address. + */ + /* RFC 7.2.4 & the Implementers Guide 2.8. + * + * 3) ... + * When a Fast Retransmit is being performed the sender SHOULD + * ignore the value of cwnd and SHOULD NOT delay retransmission. + */ + if (chunk->fast_retransmit != SCTP_NEED_FRTX && + flight_size >= transport->cwnd) + return SCTP_XMIT_RWND_FULL; + + /* Nagle's algorithm to solve small-packet problem: + * Inhibit the sending of new chunks when new outgoing data arrives + * if any previously transmitted data on the connection remains + * unacknowledged. + */ + + if (sctp_sk(asoc->base.sk)->nodelay) + /* Nagle disabled */ + return SCTP_XMIT_OK; + + if (!sctp_packet_empty(packet)) + /* Append to packet */ + return SCTP_XMIT_OK; + + if (inflight == 0) + /* Nothing unacked */ + return SCTP_XMIT_OK; + + if (!sctp_state(asoc, ESTABLISHED)) + return SCTP_XMIT_OK; + + /* Check whether this chunk and all the rest of pending data will fit + * or delay in hopes of bundling a full sized packet. + */ + if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead) + /* Enough data queued to fill a packet */ + return SCTP_XMIT_OK; + + /* Don't delay large message writes that may have been fragmented */ + if (!chunk->msg->can_delay) + return SCTP_XMIT_OK; + + /* Defer until all data acked or packet full */ + return SCTP_XMIT_DELAY; +} + +/* This private function does management things when adding DATA chunk */ +static void sctp_packet_append_data(struct sctp_packet *packet, + struct sctp_chunk *chunk) +{ + struct sctp_transport *transport = packet->transport; + size_t datasize = sctp_data_size(chunk); + struct sctp_association *asoc = transport->asoc; + u32 rwnd = asoc->peer.rwnd; + + /* Keep track of how many bytes are in flight over this transport. */ + transport->flight_size += datasize; + + /* Keep track of how many bytes are in flight to the receiver. */ + asoc->outqueue.outstanding_bytes += datasize; + + /* Update our view of the receiver's rwnd. */ + if (datasize < rwnd) + rwnd -= datasize; + else + rwnd = 0; + + asoc->peer.rwnd = rwnd; + /* Has been accepted for transmission. */ + if (!asoc->peer.prsctp_capable) + chunk->msg->can_abandon = 0; + sctp_chunk_assign_tsn(chunk); + sctp_chunk_assign_ssn(chunk); +} + +static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet, + struct sctp_chunk *chunk, + u16 chunk_len) +{ + size_t psize; + size_t pmtu; + int too_big; + sctp_xmit_t retval = SCTP_XMIT_OK; + + psize = packet->size; + pmtu = ((packet->transport->asoc) ? + (packet->transport->asoc->pathmtu) : + (packet->transport->pathmtu)); + + too_big = (psize + chunk_len > pmtu); + + /* Decide if we need to fragment or resubmit later. */ + if (too_big) { + /* It's OK to fragmet at IP level if any one of the following + * is true: + * 1. The packet is empty (meaning this chunk is greater + * the MTU) + * 2. The chunk we are adding is a control chunk + * 3. The packet doesn't have any data in it yet and data + * requires authentication. + */ + if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) || + (!packet->has_data && chunk->auth)) { + /* We no longer do re-fragmentation. + * Just fragment at the IP layer, if we + * actually hit this condition + */ + packet->ipfragok = 1; + } else { + retval = SCTP_XMIT_PMTU_FULL; + } + } + + return retval; +} diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c new file mode 100644 index 000000000..7e8f0a117 --- /dev/null +++ b/net/sctp/outqueue.c @@ -0,0 +1,1787 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2003 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions implement the sctp_outq class. The outqueue handles + * bundling and queueing of outgoing SCTP chunks. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Perry Melange <pmelange@null.cc.uic.edu> + * Xingang Guo <xingang.guo@intel.com> + * Hui Huang <hui.huang@nokia.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Jon Grimm <jgrimm@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/list.h> /* For struct list_head */ +#include <linux/socket.h> +#include <linux/ip.h> +#include <linux/slab.h> +#include <net/sock.h> /* For skb_set_owner_w */ + +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Declare internal functions here. */ +static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); +static void sctp_check_transmitted(struct sctp_outq *q, + struct list_head *transmitted_queue, + struct sctp_transport *transport, + union sctp_addr *saddr, + struct sctp_sackhdr *sack, + __u32 *highest_new_tsn); + +static void sctp_mark_missing(struct sctp_outq *q, + struct list_head *transmitted_queue, + struct sctp_transport *transport, + __u32 highest_new_tsn, + int count_of_newacks); + +static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); + +static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout); + +/* Add data to the front of the queue. */ +static inline void sctp_outq_head_data(struct sctp_outq *q, + struct sctp_chunk *ch) +{ + list_add(&ch->list, &q->out_chunk_list); + q->out_qlen += ch->skb->len; +} + +/* Take data from the front of the queue. */ +static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) +{ + struct sctp_chunk *ch = NULL; + + if (!list_empty(&q->out_chunk_list)) { + struct list_head *entry = q->out_chunk_list.next; + + ch = list_entry(entry, struct sctp_chunk, list); + list_del_init(entry); + q->out_qlen -= ch->skb->len; + } + return ch; +} +/* Add data chunk to the end of the queue. */ +static inline void sctp_outq_tail_data(struct sctp_outq *q, + struct sctp_chunk *ch) +{ + list_add_tail(&ch->list, &q->out_chunk_list); + q->out_qlen += ch->skb->len; +} + +/* + * SFR-CACC algorithm: + * D) If count_of_newacks is greater than or equal to 2 + * and t was not sent to the current primary then the + * sender MUST NOT increment missing report count for t. + */ +static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, + struct sctp_transport *transport, + int count_of_newacks) +{ + if (count_of_newacks >= 2 && transport != primary) + return 1; + return 0; +} + +/* + * SFR-CACC algorithm: + * F) If count_of_newacks is less than 2, let d be the + * destination to which t was sent. If cacc_saw_newack + * is 0 for destination d, then the sender MUST NOT + * increment missing report count for t. + */ +static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, + int count_of_newacks) +{ + if (count_of_newacks < 2 && + (transport && !transport->cacc.cacc_saw_newack)) + return 1; + return 0; +} + +/* + * SFR-CACC algorithm: + * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD + * execute steps C, D, F. + * + * C has been implemented in sctp_outq_sack + */ +static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, + struct sctp_transport *transport, + int count_of_newacks) +{ + if (!primary->cacc.cycling_changeover) { + if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) + return 1; + if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) + return 1; + return 0; + } + return 0; +} + +/* + * SFR-CACC algorithm: + * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less + * than next_tsn_at_change of the current primary, then + * the sender MUST NOT increment missing report count + * for t. + */ +static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) +{ + if (primary->cacc.cycling_changeover && + TSN_lt(tsn, primary->cacc.next_tsn_at_change)) + return 1; + return 0; +} + +/* + * SFR-CACC algorithm: + * 3) If the missing report count for TSN t is to be + * incremented according to [RFC2960] and + * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, + * then the sender MUST further execute steps 3.1 and + * 3.2 to determine if the missing report count for + * TSN t SHOULD NOT be incremented. + * + * 3.3) If 3.1 and 3.2 do not dictate that the missing + * report count for t should not be incremented, then + * the sender SHOULD increment missing report count for + * t (according to [RFC2960] and [SCTP_STEWART_2002]). + */ +static inline int sctp_cacc_skip(struct sctp_transport *primary, + struct sctp_transport *transport, + int count_of_newacks, + __u32 tsn) +{ + if (primary->cacc.changeover_active && + (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) || + sctp_cacc_skip_3_2(primary, tsn))) + return 1; + return 0; +} + +/* Initialize an existing sctp_outq. This does the boring stuff. + * You still need to define handlers if you really want to DO + * something with this structure... + */ +void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) +{ + memset(q, 0, sizeof(struct sctp_outq)); + + q->asoc = asoc; + INIT_LIST_HEAD(&q->out_chunk_list); + INIT_LIST_HEAD(&q->control_chunk_list); + INIT_LIST_HEAD(&q->retransmit); + INIT_LIST_HEAD(&q->sacked); + INIT_LIST_HEAD(&q->abandoned); +} + +/* Free the outqueue structure and any related pending chunks. + */ +static void __sctp_outq_teardown(struct sctp_outq *q) +{ + struct sctp_transport *transport; + struct list_head *lchunk, *temp; + struct sctp_chunk *chunk, *tmp; + + /* Throw away unacknowledged chunks. */ + list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, + transports) { + while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + /* Mark as part of a failed message. */ + sctp_chunk_fail(chunk, q->error); + sctp_chunk_free(chunk); + } + } + + /* Throw away chunks that have been gap ACKed. */ + list_for_each_safe(lchunk, temp, &q->sacked) { + list_del_init(lchunk); + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + sctp_chunk_fail(chunk, q->error); + sctp_chunk_free(chunk); + } + + /* Throw away any chunks in the retransmit queue. */ + list_for_each_safe(lchunk, temp, &q->retransmit) { + list_del_init(lchunk); + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + sctp_chunk_fail(chunk, q->error); + sctp_chunk_free(chunk); + } + + /* Throw away any chunks that are in the abandoned queue. */ + list_for_each_safe(lchunk, temp, &q->abandoned) { + list_del_init(lchunk); + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + sctp_chunk_fail(chunk, q->error); + sctp_chunk_free(chunk); + } + + /* Throw away any leftover data chunks. */ + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { + + /* Mark as send failure. */ + sctp_chunk_fail(chunk, q->error); + sctp_chunk_free(chunk); + } + + /* Throw away any leftover control chunks. */ + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + list_del_init(&chunk->list); + sctp_chunk_free(chunk); + } +} + +void sctp_outq_teardown(struct sctp_outq *q) +{ + __sctp_outq_teardown(q); + sctp_outq_init(q->asoc, q); +} + +/* Free the outqueue structure and any related pending chunks. */ +void sctp_outq_free(struct sctp_outq *q) +{ + /* Throw away leftover chunks. */ + __sctp_outq_teardown(q); +} + +/* Put a new chunk in an sctp_outq. */ +int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) +{ + struct net *net = sock_net(q->asoc->base.sk); + int error = 0; + + pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, + chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk"); + + /* If it is data, queue it up, otherwise, send it + * immediately. + */ + if (sctp_chunk_is_data(chunk)) { + /* Is it OK to queue data chunks? */ + /* From 9. Termination of Association + * + * When either endpoint performs a shutdown, the + * association on each peer will stop accepting new + * data from its user and only deliver data in queue + * at the time of sending or receiving the SHUTDOWN + * chunk. + */ + switch (q->asoc->state) { + case SCTP_STATE_CLOSED: + case SCTP_STATE_SHUTDOWN_PENDING: + case SCTP_STATE_SHUTDOWN_SENT: + case SCTP_STATE_SHUTDOWN_RECEIVED: + case SCTP_STATE_SHUTDOWN_ACK_SENT: + /* Cannot send after transport endpoint shutdown */ + error = -ESHUTDOWN; + break; + + default: + pr_debug("%s: outqueueing: outq:%p, chunk:%p[%s])\n", + __func__, q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk"); + + sctp_outq_tail_data(q, chunk); + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS); + else + SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS); + break; + } + } else { + list_add_tail(&chunk->list, &q->control_chunk_list); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + } + + if (error < 0) + return error; + + if (!q->cork) + error = sctp_outq_flush(q, 0); + + return error; +} + +/* Insert a chunk into the sorted list based on the TSNs. The retransmit list + * and the abandoned list are in ascending order. + */ +static void sctp_insert_list(struct list_head *head, struct list_head *new) +{ + struct list_head *pos; + struct sctp_chunk *nchunk, *lchunk; + __u32 ntsn, ltsn; + int done = 0; + + nchunk = list_entry(new, struct sctp_chunk, transmitted_list); + ntsn = ntohl(nchunk->subh.data_hdr->tsn); + + list_for_each(pos, head) { + lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); + ltsn = ntohl(lchunk->subh.data_hdr->tsn); + if (TSN_lt(ntsn, ltsn)) { + list_add(new, pos->prev); + done = 1; + break; + } + } + if (!done) + list_add_tail(new, head); +} + +/* Mark all the eligible packets on a transport for retransmission. */ +void sctp_retransmit_mark(struct sctp_outq *q, + struct sctp_transport *transport, + __u8 reason) +{ + struct list_head *lchunk, *ltemp; + struct sctp_chunk *chunk; + + /* Walk through the specified transmitted queue. */ + list_for_each_safe(lchunk, ltemp, &transport->transmitted) { + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + + /* If the chunk is abandoned, move it to abandoned list. */ + if (sctp_chunk_abandoned(chunk)) { + list_del_init(lchunk); + sctp_insert_list(&q->abandoned, lchunk); + + /* If this chunk has not been previousely acked, + * stop considering it 'outstanding'. Our peer + * will most likely never see it since it will + * not be retransmitted + */ + if (!chunk->tsn_gap_acked) { + if (chunk->transport) + chunk->transport->flight_size -= + sctp_data_size(chunk); + q->outstanding_bytes -= sctp_data_size(chunk); + q->asoc->peer.rwnd += sctp_data_size(chunk); + } + continue; + } + + /* If we are doing retransmission due to a timeout or pmtu + * discovery, only the chunks that are not yet acked should + * be added to the retransmit queue. + */ + if ((reason == SCTP_RTXR_FAST_RTX && + (chunk->fast_retransmit == SCTP_NEED_FRTX)) || + (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { + /* RFC 2960 6.2.1 Processing a Received SACK + * + * C) Any time a DATA chunk is marked for + * retransmission (via either T3-rtx timer expiration + * (Section 6.3.3) or via fast retransmit + * (Section 7.2.4)), add the data size of those + * chunks to the rwnd. + */ + q->asoc->peer.rwnd += sctp_data_size(chunk); + q->outstanding_bytes -= sctp_data_size(chunk); + if (chunk->transport) + transport->flight_size -= sctp_data_size(chunk); + + /* sctpimpguide-05 Section 2.8.2 + * M5) If a T3-rtx timer expires, the + * 'TSN.Missing.Report' of all affected TSNs is set + * to 0. + */ + chunk->tsn_missing_report = 0; + + /* If a chunk that is being used for RTT measurement + * has to be retransmitted, we cannot use this chunk + * anymore for RTT measurements. Reset rto_pending so + * that a new RTT measurement is started when a new + * data chunk is sent. + */ + if (chunk->rtt_in_progress) { + chunk->rtt_in_progress = 0; + transport->rto_pending = 0; + } + + chunk->resent = 1; + + /* Move the chunk to the retransmit queue. The chunks + * on the retransmit queue are always kept in order. + */ + list_del_init(lchunk); + sctp_insert_list(&q->retransmit, lchunk); + } + } + + pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, transport, reason, + transport->cwnd, transport->ssthresh, transport->flight_size, + transport->partial_bytes_acked); +} + +/* Mark all the eligible packets on a transport for retransmission and force + * one packet out. + */ +void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, + sctp_retransmit_reason_t reason) +{ + struct net *net = sock_net(q->asoc->base.sk); + int error = 0; + + switch (reason) { + case SCTP_RTXR_T3_RTX: + SCTP_INC_STATS(net, SCTP_MIB_T3_RETRANSMITS); + sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); + /* Update the retran path if the T3-rtx timer has expired for + * the current retran path. + */ + if (transport == transport->asoc->peer.retran_path) + sctp_assoc_update_retran_path(transport->asoc); + transport->asoc->rtx_data_chunks += + transport->asoc->unack_data; + break; + case SCTP_RTXR_FAST_RTX: + SCTP_INC_STATS(net, SCTP_MIB_FAST_RETRANSMITS); + sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); + q->fast_rtx = 1; + break; + case SCTP_RTXR_PMTUD: + SCTP_INC_STATS(net, SCTP_MIB_PMTUD_RETRANSMITS); + break; + case SCTP_RTXR_T1_RTX: + SCTP_INC_STATS(net, SCTP_MIB_T1_RETRANSMITS); + transport->asoc->init_retries++; + break; + default: + BUG(); + } + + sctp_retransmit_mark(q, transport, reason); + + /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, + * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by + * following the procedures outlined in C1 - C5. + */ + if (reason == SCTP_RTXR_T3_RTX) + sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); + + /* Flush the queues only on timeout, since fast_rtx is only + * triggered during sack processing and the queue + * will be flushed at the end. + */ + if (reason != SCTP_RTXR_FAST_RTX) + error = sctp_outq_flush(q, /* rtx_timeout */ 1); + + if (error) + q->asoc->base.sk->sk_err = -error; +} + +/* + * Transmit DATA chunks on the retransmit queue. Upon return from + * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which + * need to be transmitted by the caller. + * We assume that pkt->transport has already been set. + * + * The return value is a normal kernel error return value. + */ +static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, + int rtx_timeout, int *start_timer) +{ + struct list_head *lqueue; + struct sctp_transport *transport = pkt->transport; + sctp_xmit_t status; + struct sctp_chunk *chunk, *chunk1; + int fast_rtx; + int error = 0; + int timer = 0; + int done = 0; + + lqueue = &q->retransmit; + fast_rtx = q->fast_rtx; + + /* This loop handles time-out retransmissions, fast retransmissions, + * and retransmissions due to opening of whindow. + * + * RFC 2960 6.3.3 Handle T3-rtx Expiration + * + * E3) Determine how many of the earliest (i.e., lowest TSN) + * outstanding DATA chunks for the address for which the + * T3-rtx has expired will fit into a single packet, subject + * to the MTU constraint for the path corresponding to the + * destination transport address to which the retransmission + * is being sent (this may be different from the address for + * which the timer expires [see Section 6.4]). Call this value + * K. Bundle and retransmit those K DATA chunks in a single + * packet to the destination endpoint. + * + * [Just to be painfully clear, if we are retransmitting + * because a timeout just happened, we should send only ONE + * packet of retransmitted data.] + * + * For fast retransmissions we also send only ONE packet. However, + * if we are just flushing the queue due to open window, we'll + * try to send as much as possible. + */ + list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { + /* If the chunk is abandoned, move it to abandoned list. */ + if (sctp_chunk_abandoned(chunk)) { + list_del_init(&chunk->transmitted_list); + sctp_insert_list(&q->abandoned, + &chunk->transmitted_list); + continue; + } + + /* Make sure that Gap Acked TSNs are not retransmitted. A + * simple approach is just to move such TSNs out of the + * way and into a 'transmitted' queue and skip to the + * next chunk. + */ + if (chunk->tsn_gap_acked) { + list_move_tail(&chunk->transmitted_list, + &transport->transmitted); + continue; + } + + /* If we are doing fast retransmit, ignore non-fast_rtransmit + * chunks + */ + if (fast_rtx && !chunk->fast_retransmit) + continue; + +redo: + /* Attempt to append this chunk to the packet. */ + status = sctp_packet_append_chunk(pkt, chunk); + + switch (status) { + case SCTP_XMIT_PMTU_FULL: + if (!pkt->has_data && !pkt->has_cookie_echo) { + /* If this packet did not contain DATA then + * retransmission did not happen, so do it + * again. We'll ignore the error here since + * control chunks are already freed so there + * is nothing we can do. + */ + sctp_packet_transmit(pkt); + goto redo; + } + + /* Send this packet. */ + error = sctp_packet_transmit(pkt); + + /* If we are retransmitting, we should only + * send a single packet. + * Otherwise, try appending this chunk again. + */ + if (rtx_timeout || fast_rtx) + done = 1; + else + goto redo; + + /* Bundle next chunk in the next round. */ + break; + + case SCTP_XMIT_RWND_FULL: + /* Send this packet. */ + error = sctp_packet_transmit(pkt); + + /* Stop sending DATA as there is no more room + * at the receiver. + */ + done = 1; + break; + + case SCTP_XMIT_DELAY: + /* Send this packet. */ + error = sctp_packet_transmit(pkt); + + /* Stop sending DATA because of nagle delay. */ + done = 1; + break; + + default: + /* The append was successful, so add this chunk to + * the transmitted list. + */ + list_move_tail(&chunk->transmitted_list, + &transport->transmitted); + + /* Mark the chunk as ineligible for fast retransmit + * after it is retransmitted. + */ + if (chunk->fast_retransmit == SCTP_NEED_FRTX) + chunk->fast_retransmit = SCTP_DONT_FRTX; + + q->asoc->stats.rtxchunks++; + break; + } + + /* Set the timer if there were no errors */ + if (!error && !timer) + timer = 1; + + if (done) + break; + } + + /* If we are here due to a retransmit timeout or a fast + * retransmit and if there are any chunks left in the retransmit + * queue that could not fit in the PMTU sized packet, they need + * to be marked as ineligible for a subsequent fast retransmit. + */ + if (rtx_timeout || fast_rtx) { + list_for_each_entry(chunk1, lqueue, transmitted_list) { + if (chunk1->fast_retransmit == SCTP_NEED_FRTX) + chunk1->fast_retransmit = SCTP_DONT_FRTX; + } + } + + *start_timer = timer; + + /* Clear fast retransmit hint */ + if (fast_rtx) + q->fast_rtx = 0; + + return error; +} + +/* Cork the outqueue so queued chunks are really queued. */ +int sctp_outq_uncork(struct sctp_outq *q) +{ + if (q->cork) + q->cork = 0; + + return sctp_outq_flush(q, 0); +} + + +/* + * Try to flush an outqueue. + * + * Description: Send everything in q which we legally can, subject to + * congestion limitations. + * * Note: This function can be called from multiple contexts so appropriate + * locking concerns must be made. Today we use the sock lock to protect + * this function. + */ +static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) +{ + struct sctp_packet *packet; + struct sctp_packet singleton; + struct sctp_association *asoc = q->asoc; + __u16 sport = asoc->base.bind_addr.port; + __u16 dport = asoc->peer.port; + __u32 vtag = asoc->peer.i.init_tag; + struct sctp_transport *transport = NULL; + struct sctp_transport *new_transport; + struct sctp_chunk *chunk, *tmp; + sctp_xmit_t status; + int error = 0; + int start_timer = 0; + int one_packet = 0; + + /* These transports have chunks to send. */ + struct list_head transport_list; + struct list_head *ltransport; + + INIT_LIST_HEAD(&transport_list); + packet = NULL; + + /* + * 6.10 Bundling + * ... + * When bundling control chunks with DATA chunks, an + * endpoint MUST place control chunks first in the outbound + * SCTP packet. The transmitter MUST transmit DATA chunks + * within a SCTP packet in increasing order of TSN. + * ... + */ + + list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { + /* RFC 5061, 5.3 + * F1) This means that until such time as the ASCONF + * containing the add is acknowledged, the sender MUST + * NOT use the new IP address as a source for ANY SCTP + * packet except on carrying an ASCONF Chunk. + */ + if (asoc->src_out_of_asoc_ok && + chunk->chunk_hdr->type != SCTP_CID_ASCONF) + continue; + + list_del_init(&chunk->list); + + /* Pick the right transport to use. */ + new_transport = chunk->transport; + + if (!new_transport) { + /* + * If we have a prior transport pointer, see if + * the destination address of the chunk + * matches the destination address of the + * current transport. If not a match, then + * try to look up the transport with a given + * destination address. We do this because + * after processing ASCONFs, we may have new + * transports created. + */ + if (transport && + sctp_cmp_addr_exact(&chunk->dest, + &transport->ipaddr)) + new_transport = transport; + else + new_transport = sctp_assoc_lookup_paddr(asoc, + &chunk->dest); + + /* if we still don't have a new transport, then + * use the current active path. + */ + if (!new_transport) + new_transport = asoc->peer.active_path; + } else if ((new_transport->state == SCTP_INACTIVE) || + (new_transport->state == SCTP_UNCONFIRMED) || + (new_transport->state == SCTP_PF)) { + /* If the chunk is Heartbeat or Heartbeat Ack, + * send it to chunk->transport, even if it's + * inactive. + * + * 3.3.6 Heartbeat Acknowledgement: + * ... + * A HEARTBEAT ACK is always sent to the source IP + * address of the IP datagram containing the + * HEARTBEAT chunk to which this ack is responding. + * ... + * + * ASCONF_ACKs also must be sent to the source. + */ + if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && + chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK && + chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK) + new_transport = asoc->peer.active_path; + } + + /* Are we switching transports? + * Take care of transport locks. + */ + if (new_transport != transport) { + transport = new_transport; + if (list_empty(&transport->send_ready)) { + list_add_tail(&transport->send_ready, + &transport_list); + } + packet = &transport->packet; + sctp_packet_config(packet, vtag, + asoc->peer.ecn_capable); + } + + switch (chunk->chunk_hdr->type) { + /* + * 6.10 Bundling + * ... + * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN + * COMPLETE with any other chunks. [Send them immediately.] + */ + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + sctp_packet_init(&singleton, transport, sport, dport); + sctp_packet_config(&singleton, vtag, 0); + sctp_packet_append_chunk(&singleton, chunk); + error = sctp_packet_transmit(&singleton); + if (error < 0) + return error; + break; + + case SCTP_CID_ABORT: + if (sctp_test_T_bit(chunk)) { + packet->vtag = asoc->c.my_vtag; + } + /* The following chunks are "response" chunks, i.e. + * they are generated in response to something we + * received. If we are sending these, then we can + * send only 1 packet containing these chunks. + */ + case SCTP_CID_HEARTBEAT_ACK: + case SCTP_CID_SHUTDOWN_ACK: + case SCTP_CID_COOKIE_ACK: + case SCTP_CID_COOKIE_ECHO: + case SCTP_CID_ERROR: + case SCTP_CID_ECN_CWR: + case SCTP_CID_ASCONF_ACK: + one_packet = 1; + /* Fall through */ + + case SCTP_CID_SACK: + case SCTP_CID_HEARTBEAT: + case SCTP_CID_SHUTDOWN: + case SCTP_CID_ECN_ECNE: + case SCTP_CID_ASCONF: + case SCTP_CID_FWD_TSN: + status = sctp_packet_transmit_chunk(packet, chunk, + one_packet); + if (status != SCTP_XMIT_OK) { + /* put the chunk back */ + list_add(&chunk->list, &q->control_chunk_list); + } else { + asoc->stats.octrlchunks++; + /* PR-SCTP C5) If a FORWARD TSN is sent, the + * sender MUST assure that at least one T3-rtx + * timer is running. + */ + if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) + sctp_transport_reset_timers(transport); + } + break; + + default: + /* We built a chunk with an illegal type! */ + BUG(); + } + } + + if (q->asoc->src_out_of_asoc_ok) + goto sctp_flush_out; + + /* Is it OK to send data chunks? */ + switch (asoc->state) { + case SCTP_STATE_COOKIE_ECHOED: + /* Only allow bundling when this packet has a COOKIE-ECHO + * chunk. + */ + if (!packet || !packet->has_cookie_echo) + break; + + /* fallthru */ + case SCTP_STATE_ESTABLISHED: + case SCTP_STATE_SHUTDOWN_PENDING: + case SCTP_STATE_SHUTDOWN_RECEIVED: + /* + * RFC 2960 6.1 Transmission of DATA Chunks + * + * C) When the time comes for the sender to transmit, + * before sending new DATA chunks, the sender MUST + * first transmit any outstanding DATA chunks which + * are marked for retransmission (limited by the + * current cwnd). + */ + if (!list_empty(&q->retransmit)) { + if (asoc->peer.retran_path->state == SCTP_UNCONFIRMED) + goto sctp_flush_out; + if (transport == asoc->peer.retran_path) + goto retran; + + /* Switch transports & prepare the packet. */ + + transport = asoc->peer.retran_path; + + if (list_empty(&transport->send_ready)) { + list_add_tail(&transport->send_ready, + &transport_list); + } + + packet = &transport->packet; + sctp_packet_config(packet, vtag, + asoc->peer.ecn_capable); + retran: + error = sctp_outq_flush_rtx(q, packet, + rtx_timeout, &start_timer); + + if (start_timer) + sctp_transport_reset_timers(transport); + + /* This can happen on COOKIE-ECHO resend. Only + * one chunk can get bundled with a COOKIE-ECHO. + */ + if (packet->has_cookie_echo) + goto sctp_flush_out; + + /* Don't send new data if there is still data + * waiting to retransmit. + */ + if (!list_empty(&q->retransmit)) + goto sctp_flush_out; + } + + /* Apply Max.Burst limitation to the current transport in + * case it will be used for new data. We are going to + * rest it before we return, but we want to apply the limit + * to the currently queued data. + */ + if (transport) + sctp_transport_burst_limited(transport); + + /* Finally, transmit new packets. */ + while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { + /* RFC 2960 6.5 Every DATA chunk MUST carry a valid + * stream identifier. + */ + if (chunk->sinfo.sinfo_stream >= + asoc->c.sinit_num_ostreams) { + + /* Mark as failed send. */ + sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); + sctp_chunk_free(chunk); + continue; + } + + /* Has this chunk expired? */ + if (sctp_chunk_abandoned(chunk)) { + sctp_chunk_fail(chunk, 0); + sctp_chunk_free(chunk); + continue; + } + + /* If there is a specified transport, use it. + * Otherwise, we want to use the active path. + */ + new_transport = chunk->transport; + if (!new_transport || + ((new_transport->state == SCTP_INACTIVE) || + (new_transport->state == SCTP_UNCONFIRMED) || + (new_transport->state == SCTP_PF))) + new_transport = asoc->peer.active_path; + if (new_transport->state == SCTP_UNCONFIRMED) + continue; + + /* Change packets if necessary. */ + if (new_transport != transport) { + transport = new_transport; + + /* Schedule to have this transport's + * packet flushed. + */ + if (list_empty(&transport->send_ready)) { + list_add_tail(&transport->send_ready, + &transport_list); + } + + packet = &transport->packet; + sctp_packet_config(packet, vtag, + asoc->peer.ecn_capable); + /* We've switched transports, so apply the + * Burst limit to the new transport. + */ + sctp_transport_burst_limited(transport); + } + + pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p " + "skb->users:%d\n", + __func__, q, chunk, chunk && chunk->chunk_hdr ? + sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : + "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), + chunk->skb ? chunk->skb->head : NULL, chunk->skb ? + atomic_read(&chunk->skb->users) : -1); + + /* Add the chunk to the packet. */ + status = sctp_packet_transmit_chunk(packet, chunk, 0); + + switch (status) { + case SCTP_XMIT_PMTU_FULL: + case SCTP_XMIT_RWND_FULL: + case SCTP_XMIT_DELAY: + /* We could not append this chunk, so put + * the chunk back on the output queue. + */ + pr_debug("%s: could not transmit tsn:0x%x, status:%d\n", + __func__, ntohl(chunk->subh.data_hdr->tsn), + status); + + sctp_outq_head_data(q, chunk); + goto sctp_flush_out; + + case SCTP_XMIT_OK: + /* The sender is in the SHUTDOWN-PENDING state, + * The sender MAY set the I-bit in the DATA + * chunk header. + */ + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) + chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) + asoc->stats.ouodchunks++; + else + asoc->stats.oodchunks++; + + break; + + default: + BUG(); + } + + /* BUG: We assume that the sctp_packet_transmit() + * call below will succeed all the time and add the + * chunk to the transmitted list and restart the + * timers. + * It is possible that the call can fail under OOM + * conditions. + * + * Is this really a problem? Won't this behave + * like a lost TSN? + */ + list_add_tail(&chunk->transmitted_list, + &transport->transmitted); + + sctp_transport_reset_timers(transport); + + /* Only let one DATA chunk get bundled with a + * COOKIE-ECHO chunk. + */ + if (packet->has_cookie_echo) + goto sctp_flush_out; + } + break; + + default: + /* Do nothing. */ + break; + } + +sctp_flush_out: + + /* Before returning, examine all the transports touched in + * this call. Right now, we bluntly force clear all the + * transports. Things might change after we implement Nagle. + * But such an examination is still required. + * + * --xguo + */ + while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL) { + struct sctp_transport *t = list_entry(ltransport, + struct sctp_transport, + send_ready); + packet = &t->packet; + if (!sctp_packet_empty(packet)) + error = sctp_packet_transmit(packet); + + /* Clear the burst limited state, if any */ + sctp_transport_burst_reset(t); + } + + return error; +} + +/* Update unack_data based on the incoming SACK chunk */ +static void sctp_sack_update_unack_data(struct sctp_association *assoc, + struct sctp_sackhdr *sack) +{ + sctp_sack_variable_t *frags; + __u16 unack_data; + int i; + + unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; + + frags = sack->variable; + for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { + unack_data -= ((ntohs(frags[i].gab.end) - + ntohs(frags[i].gab.start) + 1)); + } + + assoc->unack_data = unack_data; +} + +/* This is where we REALLY process a SACK. + * + * Process the SACK against the outqueue. Mostly, this just frees + * things off the transmitted queue. + */ +int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = q->asoc; + struct sctp_sackhdr *sack = chunk->subh.sack_hdr; + struct sctp_transport *transport; + struct sctp_chunk *tchunk = NULL; + struct list_head *lchunk, *transport_list, *temp; + sctp_sack_variable_t *frags = sack->variable; + __u32 sack_ctsn, ctsn, tsn; + __u32 highest_tsn, highest_new_tsn; + __u32 sack_a_rwnd; + unsigned int outstanding; + struct sctp_transport *primary = asoc->peer.primary_path; + int count_of_newacks = 0; + int gap_ack_blocks; + u8 accum_moved = 0; + + /* Grab the association's destination address list. */ + transport_list = &asoc->peer.transport_addr_list; + + sack_ctsn = ntohl(sack->cum_tsn_ack); + gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); + asoc->stats.gapcnt += gap_ack_blocks; + /* + * SFR-CACC algorithm: + * On receipt of a SACK the sender SHOULD execute the + * following statements. + * + * 1) If the cumulative ack in the SACK passes next tsn_at_change + * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be + * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for + * all destinations. + * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE + * is set the receiver of the SACK MUST take the following actions: + * + * A) Initialize the cacc_saw_newack to 0 for all destination + * addresses. + * + * Only bother if changeover_active is set. Otherwise, this is + * totally suboptimal to do on every SACK. + */ + if (primary->cacc.changeover_active) { + u8 clear_cycling = 0; + + if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { + primary->cacc.changeover_active = 0; + clear_cycling = 1; + } + + if (clear_cycling || gap_ack_blocks) { + list_for_each_entry(transport, transport_list, + transports) { + if (clear_cycling) + transport->cacc.cycling_changeover = 0; + if (gap_ack_blocks) + transport->cacc.cacc_saw_newack = 0; + } + } + } + + /* Get the highest TSN in the sack. */ + highest_tsn = sack_ctsn; + if (gap_ack_blocks) + highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); + + if (TSN_lt(asoc->highest_sacked, highest_tsn)) + asoc->highest_sacked = highest_tsn; + + highest_new_tsn = sack_ctsn; + + /* Run through the retransmit queue. Credit bytes received + * and free those chunks that we can. + */ + sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); + + /* Run through the transmitted queue. + * Credit bytes received and free those chunks which we can. + * + * This is a MASSIVE candidate for optimization. + */ + list_for_each_entry(transport, transport_list, transports) { + sctp_check_transmitted(q, &transport->transmitted, + transport, &chunk->source, sack, + &highest_new_tsn); + /* + * SFR-CACC algorithm: + * C) Let count_of_newacks be the number of + * destinations for which cacc_saw_newack is set. + */ + if (transport->cacc.cacc_saw_newack) + count_of_newacks++; + } + + /* Move the Cumulative TSN Ack Point if appropriate. */ + if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { + asoc->ctsn_ack_point = sack_ctsn; + accum_moved = 1; + } + + if (gap_ack_blocks) { + + if (asoc->fast_recovery && accum_moved) + highest_new_tsn = highest_tsn; + + list_for_each_entry(transport, transport_list, transports) + sctp_mark_missing(q, &transport->transmitted, transport, + highest_new_tsn, count_of_newacks); + } + + /* Update unack_data field in the assoc. */ + sctp_sack_update_unack_data(asoc, sack); + + ctsn = asoc->ctsn_ack_point; + + /* Throw away stuff rotting on the sack queue. */ + list_for_each_safe(lchunk, temp, &q->sacked) { + tchunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + tsn = ntohl(tchunk->subh.data_hdr->tsn); + if (TSN_lte(tsn, ctsn)) { + list_del_init(&tchunk->transmitted_list); + sctp_chunk_free(tchunk); + } + } + + /* ii) Set rwnd equal to the newly received a_rwnd minus the + * number of bytes still outstanding after processing the + * Cumulative TSN Ack and the Gap Ack Blocks. + */ + + sack_a_rwnd = ntohl(sack->a_rwnd); + outstanding = q->outstanding_bytes; + + if (outstanding < sack_a_rwnd) + sack_a_rwnd -= outstanding; + else + sack_a_rwnd = 0; + + asoc->peer.rwnd = sack_a_rwnd; + + sctp_generate_fwdtsn(q, sack_ctsn); + + pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); + pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " + "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, + asoc->adv_peer_ack_point); + + return sctp_outq_is_empty(q); +} + +/* Is the outqueue empty? + * The queue is empty when we have not pending data, no in-flight data + * and nothing pending retransmissions. + */ +int sctp_outq_is_empty(const struct sctp_outq *q) +{ + return q->out_qlen == 0 && q->outstanding_bytes == 0 && + list_empty(&q->retransmit); +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* Go through a transport's transmitted list or the association's retransmit + * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. + * The retransmit list will not have an associated transport. + * + * I added coherent debug information output. --xguo + * + * Instead of printing 'sacked' or 'kept' for each TSN on the + * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. + * KEPT TSN6-TSN7, etc. + */ +static void sctp_check_transmitted(struct sctp_outq *q, + struct list_head *transmitted_queue, + struct sctp_transport *transport, + union sctp_addr *saddr, + struct sctp_sackhdr *sack, + __u32 *highest_new_tsn_in_sack) +{ + struct list_head *lchunk; + struct sctp_chunk *tchunk; + struct list_head tlist; + __u32 tsn; + __u32 sack_ctsn; + __u32 rtt; + __u8 restart_timer = 0; + int bytes_acked = 0; + int migrate_bytes = 0; + bool forward_progress = false; + + sack_ctsn = ntohl(sack->cum_tsn_ack); + + INIT_LIST_HEAD(&tlist); + + /* The while loop will skip empty transmitted queues. */ + while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { + tchunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + + if (sctp_chunk_abandoned(tchunk)) { + /* Move the chunk to abandoned list. */ + sctp_insert_list(&q->abandoned, lchunk); + + /* If this chunk has not been acked, stop + * considering it as 'outstanding'. + */ + if (!tchunk->tsn_gap_acked) { + if (tchunk->transport) + tchunk->transport->flight_size -= + sctp_data_size(tchunk); + q->outstanding_bytes -= sctp_data_size(tchunk); + } + continue; + } + + tsn = ntohl(tchunk->subh.data_hdr->tsn); + if (sctp_acked(sack, tsn)) { + /* If this queue is the retransmit queue, the + * retransmit timer has already reclaimed + * the outstanding bytes for this chunk, so only + * count bytes associated with a transport. + */ + if (transport) { + /* If this chunk is being used for RTT + * measurement, calculate the RTT and update + * the RTO using this value. + * + * 6.3.1 C5) Karn's algorithm: RTT measurements + * MUST NOT be made using packets that were + * retransmitted (and thus for which it is + * ambiguous whether the reply was for the + * first instance of the packet or a later + * instance). + */ + if (!tchunk->tsn_gap_acked && + !tchunk->resent && + tchunk->rtt_in_progress) { + tchunk->rtt_in_progress = 0; + rtt = jiffies - tchunk->sent_at; + sctp_transport_update_rto(transport, + rtt); + } + } + + /* If the chunk hasn't been marked as ACKED, + * mark it and account bytes_acked if the + * chunk had a valid transport (it will not + * have a transport if ASCONF had deleted it + * while DATA was outstanding). + */ + if (!tchunk->tsn_gap_acked) { + tchunk->tsn_gap_acked = 1; + if (TSN_lt(*highest_new_tsn_in_sack, tsn)) + *highest_new_tsn_in_sack = tsn; + bytes_acked += sctp_data_size(tchunk); + if (!tchunk->transport) + migrate_bytes += sctp_data_size(tchunk); + forward_progress = true; + } + + if (TSN_lte(tsn, sack_ctsn)) { + /* RFC 2960 6.3.2 Retransmission Timer Rules + * + * R3) Whenever a SACK is received + * that acknowledges the DATA chunk + * with the earliest outstanding TSN + * for that address, restart T3-rtx + * timer for that address with its + * current RTO. + */ + restart_timer = 1; + forward_progress = true; + + if (!tchunk->tsn_gap_acked) { + /* + * SFR-CACC algorithm: + * 2) If the SACK contains gap acks + * and the flag CHANGEOVER_ACTIVE is + * set the receiver of the SACK MUST + * take the following action: + * + * B) For each TSN t being acked that + * has not been acked in any SACK so + * far, set cacc_saw_newack to 1 for + * the destination that the TSN was + * sent to. + */ + if (transport && + sack->num_gap_ack_blocks && + q->asoc->peer.primary_path->cacc. + changeover_active) + transport->cacc.cacc_saw_newack + = 1; + } + + list_add_tail(&tchunk->transmitted_list, + &q->sacked); + } else { + /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 + * M2) Each time a SACK arrives reporting + * 'Stray DATA chunk(s)' record the highest TSN + * reported as newly acknowledged, call this + * value 'HighestTSNinSack'. A newly + * acknowledged DATA chunk is one not + * previously acknowledged in a SACK. + * + * When the SCTP sender of data receives a SACK + * chunk that acknowledges, for the first time, + * the receipt of a DATA chunk, all the still + * unacknowledged DATA chunks whose TSN is + * older than that newly acknowledged DATA + * chunk, are qualified as 'Stray DATA chunks'. + */ + list_add_tail(lchunk, &tlist); + } + } else { + if (tchunk->tsn_gap_acked) { + pr_debug("%s: receiver reneged on data TSN:0x%x\n", + __func__, tsn); + + tchunk->tsn_gap_acked = 0; + + if (tchunk->transport) + bytes_acked -= sctp_data_size(tchunk); + + /* RFC 2960 6.3.2 Retransmission Timer Rules + * + * R4) Whenever a SACK is received missing a + * TSN that was previously acknowledged via a + * Gap Ack Block, start T3-rtx for the + * destination address to which the DATA + * chunk was originally + * transmitted if it is not already running. + */ + restart_timer = 1; + } + + list_add_tail(lchunk, &tlist); + } + } + + if (transport) { + if (bytes_acked) { + struct sctp_association *asoc = transport->asoc; + + /* We may have counted DATA that was migrated + * to this transport due to DEL-IP operation. + * Subtract those bytes, since the were never + * send on this transport and shouldn't be + * credited to this transport. + */ + bytes_acked -= migrate_bytes; + + /* 8.2. When an outstanding TSN is acknowledged, + * the endpoint shall clear the error counter of + * the destination transport address to which the + * DATA chunk was last sent. + * The association's overall error counter is + * also cleared. + */ + transport->error_count = 0; + transport->asoc->overall_error_count = 0; + forward_progress = true; + + /* + * While in SHUTDOWN PENDING, we may have started + * the T5 shutdown guard timer after reaching the + * retransmission limit. Stop that timer as soon + * as the receiver acknowledged any data. + */ + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && + del_timer(&asoc->timers + [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD])) + sctp_association_put(asoc); + + /* Mark the destination transport address as + * active if it is not so marked. + */ + if ((transport->state == SCTP_INACTIVE || + transport->state == SCTP_UNCONFIRMED) && + sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { + sctp_assoc_control_transport( + transport->asoc, + transport, + SCTP_TRANSPORT_UP, + SCTP_RECEIVED_SACK); + } + + sctp_transport_raise_cwnd(transport, sack_ctsn, + bytes_acked); + + transport->flight_size -= bytes_acked; + if (transport->flight_size == 0) + transport->partial_bytes_acked = 0; + q->outstanding_bytes -= bytes_acked + migrate_bytes; + } else { + /* RFC 2960 6.1, sctpimpguide-06 2.15.2 + * When a sender is doing zero window probing, it + * should not timeout the association if it continues + * to receive new packets from the receiver. The + * reason is that the receiver MAY keep its window + * closed for an indefinite time. + * A sender is doing zero window probing when the + * receiver's advertised window is zero, and there is + * only one data chunk in flight to the receiver. + * + * Allow the association to timeout while in SHUTDOWN + * PENDING or SHUTDOWN RECEIVED in case the receiver + * stays in zero window mode forever. + */ + if (!q->asoc->peer.rwnd && + !list_empty(&tlist) && + (sack_ctsn+2 == q->asoc->next_tsn) && + q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { + pr_debug("%s: sack received for zero window " + "probe:%u\n", __func__, sack_ctsn); + + q->asoc->overall_error_count = 0; + transport->error_count = 0; + } + } + + /* RFC 2960 6.3.2 Retransmission Timer Rules + * + * R2) Whenever all outstanding data sent to an address have + * been acknowledged, turn off the T3-rtx timer of that + * address. + */ + if (!transport->flight_size) { + if (del_timer(&transport->T3_rtx_timer)) + sctp_transport_put(transport); + } else if (restart_timer) { + if (!mod_timer(&transport->T3_rtx_timer, + jiffies + transport->rto)) + sctp_transport_hold(transport); + } + + if (forward_progress) { + if (transport->dst) + dst_confirm(transport->dst); + } + } + + list_splice(&tlist, transmitted_queue); +} + +/* Mark chunks as missing and consequently may get retransmitted. */ +static void sctp_mark_missing(struct sctp_outq *q, + struct list_head *transmitted_queue, + struct sctp_transport *transport, + __u32 highest_new_tsn_in_sack, + int count_of_newacks) +{ + struct sctp_chunk *chunk; + __u32 tsn; + char do_fast_retransmit = 0; + struct sctp_association *asoc = q->asoc; + struct sctp_transport *primary = asoc->peer.primary_path; + + list_for_each_entry(chunk, transmitted_queue, transmitted_list) { + + tsn = ntohl(chunk->subh.data_hdr->tsn); + + /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all + * 'Unacknowledged TSN's', if the TSN number of an + * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' + * value, increment the 'TSN.Missing.Report' count on that + * chunk if it has NOT been fast retransmitted or marked for + * fast retransmit already. + */ + if (chunk->fast_retransmit == SCTP_CAN_FRTX && + !chunk->tsn_gap_acked && + TSN_lt(tsn, highest_new_tsn_in_sack)) { + + /* SFR-CACC may require us to skip marking + * this chunk as missing. + */ + if (!transport || !sctp_cacc_skip(primary, + chunk->transport, + count_of_newacks, tsn)) { + chunk->tsn_missing_report++; + + pr_debug("%s: tsn:0x%x missing counter:%d\n", + __func__, tsn, chunk->tsn_missing_report); + } + } + /* + * M4) If any DATA chunk is found to have a + * 'TSN.Missing.Report' + * value larger than or equal to 3, mark that chunk for + * retransmission and start the fast retransmit procedure. + */ + + if (chunk->tsn_missing_report >= 3) { + chunk->fast_retransmit = SCTP_NEED_FRTX; + do_fast_retransmit = 1; + } + } + + if (transport) { + if (do_fast_retransmit) + sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); + + pr_debug("%s: transport:%p, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, transport, + transport->cwnd, transport->ssthresh, + transport->flight_size, transport->partial_bytes_acked); + } +} + +/* Is the given TSN acked by this packet? */ +static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) +{ + int i; + sctp_sack_variable_t *frags; + __u16 gap; + __u32 ctsn = ntohl(sack->cum_tsn_ack); + + if (TSN_lte(tsn, ctsn)) + goto pass; + + /* 3.3.4 Selective Acknowledgement (SACK) (3): + * + * Gap Ack Blocks: + * These fields contain the Gap Ack Blocks. They are repeated + * for each Gap Ack Block up to the number of Gap Ack Blocks + * defined in the Number of Gap Ack Blocks field. All DATA + * chunks with TSNs greater than or equal to (Cumulative TSN + * Ack + Gap Ack Block Start) and less than or equal to + * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack + * Block are assumed to have been received correctly. + */ + + frags = sack->variable; + gap = tsn - ctsn; + for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { + if (TSN_lte(ntohs(frags[i].gab.start), gap) && + TSN_lte(gap, ntohs(frags[i].gab.end))) + goto pass; + } + + return 0; +pass: + return 1; +} + +static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, + int nskips, __be16 stream) +{ + int i; + + for (i = 0; i < nskips; i++) { + if (skiplist[i].stream == stream) + return i; + } + return i; +} + +/* Create and add a fwdtsn chunk to the outq's control queue if needed. */ +static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) +{ + struct sctp_association *asoc = q->asoc; + struct sctp_chunk *ftsn_chunk = NULL; + struct sctp_fwdtsn_skip ftsn_skip_arr[10]; + int nskips = 0; + int skip_pos = 0; + __u32 tsn; + struct sctp_chunk *chunk; + struct list_head *lchunk, *temp; + + if (!asoc->peer.prsctp_capable) + return; + + /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the + * received SACK. + * + * If (Advanced.Peer.Ack.Point < SackCumAck), then update + * Advanced.Peer.Ack.Point to be equal to SackCumAck. + */ + if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) + asoc->adv_peer_ack_point = ctsn; + + /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" + * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as + * the chunk next in the out-queue space is marked as "abandoned" as + * shown in the following example: + * + * Assuming that a SACK arrived with the Cumulative TSN ACK 102 + * and the Advanced.Peer.Ack.Point is updated to this value: + * + * out-queue at the end of ==> out-queue after Adv.Ack.Point + * normal SACK processing local advancement + * ... ... + * Adv.Ack.Pt-> 102 acked 102 acked + * 103 abandoned 103 abandoned + * 104 abandoned Adv.Ack.P-> 104 abandoned + * 105 105 + * 106 acked 106 acked + * ... ... + * + * In this example, the data sender successfully advanced the + * "Advanced.Peer.Ack.Point" from 102 to 104 locally. + */ + list_for_each_safe(lchunk, temp, &q->abandoned) { + chunk = list_entry(lchunk, struct sctp_chunk, + transmitted_list); + tsn = ntohl(chunk->subh.data_hdr->tsn); + + /* Remove any chunks in the abandoned queue that are acked by + * the ctsn. + */ + if (TSN_lte(tsn, ctsn)) { + list_del_init(lchunk); + sctp_chunk_free(chunk); + } else { + if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { + asoc->adv_peer_ack_point = tsn; + if (chunk->chunk_hdr->flags & + SCTP_DATA_UNORDERED) + continue; + skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], + nskips, + chunk->subh.data_hdr->stream); + ftsn_skip_arr[skip_pos].stream = + chunk->subh.data_hdr->stream; + ftsn_skip_arr[skip_pos].ssn = + chunk->subh.data_hdr->ssn; + if (skip_pos == nskips) + nskips++; + if (nskips == 10) + break; + } else + break; + } + } + + /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" + * is greater than the Cumulative TSN ACK carried in the received + * SACK, the data sender MUST send the data receiver a FORWARD TSN + * chunk containing the latest value of the + * "Advanced.Peer.Ack.Point". + * + * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD + * list each stream and sequence number in the forwarded TSN. This + * information will enable the receiver to easily find any + * stranded TSN's waiting on stream reorder queues. Each stream + * SHOULD only be reported once; this means that if multiple + * abandoned messages occur in the same stream then only the + * highest abandoned stream sequence number is reported. If the + * total size of the FORWARD TSN does NOT fit in a single MTU then + * the sender of the FORWARD TSN SHOULD lower the + * Advanced.Peer.Ack.Point to the last TSN that will fit in a + * single MTU. + */ + if (asoc->adv_peer_ack_point > ctsn) + ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, + nskips, &ftsn_skip_arr[0]); + + if (ftsn_chunk) { + list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); + SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); + } +} diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c new file mode 100644 index 000000000..ab8d9f96a --- /dev/null +++ b/net/sctp/primitive.c @@ -0,0 +1,213 @@ +/* SCTP kernel implementation + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * + * This file is part of the SCTP kernel implementation + * + * These functions implement the SCTP primitive functions from Section 10. + * + * Note that the descriptions from the specification are USER level + * functions--this file is the functions which populate the struct proto + * for SCTP which is the BOTTOM of the sockets interface. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Narasimha Budihal <narasimha@refcode.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Ardelle Fan <ardelle.fan@intel.com> + * Kevin Gao <kevin.gao@intel.com> + */ + +#include <linux/types.h> +#include <linux/list.h> /* For struct list_head */ +#include <linux/socket.h> +#include <linux/ip.h> +#include <linux/time.h> /* For struct timeval */ +#include <linux/gfp.h> +#include <net/sock.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +#define DECLARE_PRIMITIVE(name) \ +/* This is called in the code as sctp_primitive_ ## name. */ \ +int sctp_primitive_ ## name(struct net *net, struct sctp_association *asoc, \ + void *arg) { \ + int error = 0; \ + sctp_event_t event_type; sctp_subtype_t subtype; \ + sctp_state_t state; \ + struct sctp_endpoint *ep; \ + \ + event_type = SCTP_EVENT_T_PRIMITIVE; \ + subtype = SCTP_ST_PRIMITIVE(SCTP_PRIMITIVE_ ## name); \ + state = asoc ? asoc->state : SCTP_STATE_CLOSED; \ + ep = asoc ? asoc->ep : NULL; \ + \ + error = sctp_do_sm(net, event_type, subtype, state, ep, asoc, \ + arg, GFP_KERNEL); \ + return error; \ +} + +/* 10.1 ULP-to-SCTP + * B) Associate + * + * Format: ASSOCIATE(local SCTP instance name, destination transport addr, + * outbound stream count) + * -> association id [,destination transport addr list] [,outbound stream + * count] + * + * This primitive allows the upper layer to initiate an association to a + * specific peer endpoint. + * + * This version assumes that asoc is fully populated with the initial + * parameters. We then return a traditional kernel indicator of + * success or failure. + */ + +/* This is called in the code as sctp_primitive_ASSOCIATE. */ + +DECLARE_PRIMITIVE(ASSOCIATE) + +/* 10.1 ULP-to-SCTP + * C) Shutdown + * + * Format: SHUTDOWN(association id) + * -> result + * + * Gracefully closes an association. Any locally queued user data + * will be delivered to the peer. The association will be terminated only + * after the peer acknowledges all the SCTP packets sent. A success code + * will be returned on successful termination of the association. If + * attempting to terminate the association results in a failure, an error + * code shall be returned. + */ + +DECLARE_PRIMITIVE(SHUTDOWN); + +/* 10.1 ULP-to-SCTP + * C) Abort + * + * Format: Abort(association id [, cause code]) + * -> result + * + * Ungracefully closes an association. Any locally queued user data + * will be discarded and an ABORT chunk is sent to the peer. A success + * code will be returned on successful abortion of the association. If + * attempting to abort the association results in a failure, an error + * code shall be returned. + */ + +DECLARE_PRIMITIVE(ABORT); + +/* 10.1 ULP-to-SCTP + * E) Send + * + * Format: SEND(association id, buffer address, byte count [,context] + * [,stream id] [,life time] [,destination transport address] + * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) + * -> result + * + * This is the main method to send user data via SCTP. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * o buffer address - the location where the user message to be + * transmitted is stored; + * + * o byte count - The size of the user data in number of bytes; + * + * Optional attributes: + * + * o context - an optional 32 bit integer that will be carried in the + * sending failure notification to the ULP if the transportation of + * this User Message fails. + * + * o stream id - to indicate which stream to send the data on. If not + * specified, stream 0 will be used. + * + * o life time - specifies the life time of the user data. The user data + * will not be sent by SCTP after the life time expires. This + * parameter can be used to avoid efforts to transmit stale + * user messages. SCTP notifies the ULP if the data cannot be + * initiated to transport (i.e. sent to the destination via SCTP's + * send primitive) within the life time variable. However, the + * user data will be transmitted if SCTP has attempted to transmit a + * chunk before the life time expired. + * + * o destination transport address - specified as one of the destination + * transport addresses of the peer endpoint to which this packet + * should be sent. Whenever possible, SCTP should use this destination + * transport address for sending the packets, instead of the current + * primary path. + * + * o unorder flag - this flag, if present, indicates that the user + * would like the data delivered in an unordered fashion to the peer + * (i.e., the U flag is set to 1 on all DATA chunks carrying this + * message). + * + * o no-bundle flag - instructs SCTP not to bundle this user data with + * other outbound DATA chunks. SCTP MAY still bundle even when + * this flag is present, when faced with network congestion. + * + * o payload protocol-id - A 32 bit unsigned integer that is to be + * passed to the peer indicating the type of payload protocol data + * being transmitted. This value is passed as opaque data by SCTP. + */ + +DECLARE_PRIMITIVE(SEND); + +/* 10.1 ULP-to-SCTP + * J) Request Heartbeat + * + * Format: REQUESTHEARTBEAT(association id, destination transport address) + * + * -> result + * + * Instructs the local endpoint to perform a HeartBeat on the specified + * destination transport address of the given association. The returned + * result should indicate whether the transmission of the HEARTBEAT + * chunk to the destination address is successful. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * o destination transport address - the transport address of the + * association on which a heartbeat should be issued. + */ + +DECLARE_PRIMITIVE(REQUESTHEARTBEAT); + +/* ADDIP +* 3.1.1 Address Configuration Change Chunk (ASCONF) +* +* This chunk is used to communicate to the remote endpoint one of the +* configuration change requests that MUST be acknowledged. The +* information carried in the ASCONF Chunk uses the form of a +* Type-Length-Value (TLV), as described in "3.2.1 Optional/ +* Variable-length Parameter Format" in RFC2960 [5], forall variable +* parameters. +*/ + +DECLARE_PRIMITIVE(ASCONF); diff --git a/net/sctp/probe.c b/net/sctp/probe.c new file mode 100644 index 000000000..5e68b94ee --- /dev/null +++ b/net/sctp/probe.c @@ -0,0 +1,243 @@ +/* + * sctp_probe - Observe the SCTP flow with kprobes. + * + * The idea for this came from Werner Almesberger's umlsim + * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> + * + * Modified for SCTP from Stephen Hemminger's code + * Copyright (C) 2010, Wei Yongjun <yjwei@cn.fujitsu.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/kprobes.h> +#include <linux/socket.h> +#include <linux/sctp.h> +#include <linux/proc_fs.h> +#include <linux/vmalloc.h> +#include <linux/module.h> +#include <linux/kfifo.h> +#include <linux/time.h> +#include <net/net_namespace.h> + +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +MODULE_SOFTDEP("pre: sctp"); +MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>"); +MODULE_DESCRIPTION("SCTP snooper"); +MODULE_LICENSE("GPL"); + +static int port __read_mostly = 0; +MODULE_PARM_DESC(port, "Port to match (0=all)"); +module_param(port, int, 0); + +static unsigned int fwmark __read_mostly = 0; +MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)"); +module_param(fwmark, uint, 0); + +static int bufsize __read_mostly = 64 * 1024; +MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)"); +module_param(bufsize, int, 0); + +static int full __read_mostly = 1; +MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)"); +module_param(full, int, 0); + +static const char procname[] = "sctpprobe"; + +static struct { + struct kfifo fifo; + spinlock_t lock; + wait_queue_head_t wait; + struct timespec tstart; +} sctpw; + +static __printf(1, 2) void printl(const char *fmt, ...) +{ + va_list args; + int len; + char tbuf[256]; + + va_start(args, fmt); + len = vscnprintf(tbuf, sizeof(tbuf), fmt, args); + va_end(args); + + kfifo_in_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); + wake_up(&sctpw.wait); +} + +static int sctpprobe_open(struct inode *inode, struct file *file) +{ + kfifo_reset(&sctpw.fifo); + getnstimeofday(&sctpw.tstart); + + return 0; +} + +static ssize_t sctpprobe_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + int error = 0, cnt = 0; + unsigned char *tbuf; + + if (!buf) + return -EINVAL; + + if (len == 0) + return 0; + + tbuf = vmalloc(len); + if (!tbuf) + return -ENOMEM; + + error = wait_event_interruptible(sctpw.wait, + kfifo_len(&sctpw.fifo) != 0); + if (error) + goto out_free; + + cnt = kfifo_out_locked(&sctpw.fifo, tbuf, len, &sctpw.lock); + error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; + +out_free: + vfree(tbuf); + + return error ? error : cnt; +} + +static const struct file_operations sctpprobe_fops = { + .owner = THIS_MODULE, + .open = sctpprobe_open, + .read = sctpprobe_read, + .llseek = noop_llseek, +}; + +static sctp_disposition_t jsctp_sf_eat_sack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sk_buff *skb = chunk->skb; + struct sctp_transport *sp; + static __u32 lcwnd = 0; + struct timespec now; + + sp = asoc->peer.primary_path; + + if (((port == 0 && fwmark == 0) || + asoc->peer.port == port || + ep->base.bind_addr.port == port || + (fwmark > 0 && skb->mark == fwmark)) && + (full || sp->cwnd != lcwnd)) { + lcwnd = sp->cwnd; + + getnstimeofday(&now); + now = timespec_sub(now, sctpw.tstart); + + printl("%lu.%06lu ", (unsigned long) now.tv_sec, + (unsigned long) now.tv_nsec / NSEC_PER_USEC); + + printl("%p %5d %5d %5d %8d %5d ", asoc, + ep->base.bind_addr.port, asoc->peer.port, + asoc->pathmtu, asoc->peer.rwnd, asoc->unack_data); + + list_for_each_entry(sp, &asoc->peer.transport_addr_list, + transports) { + if (sp == asoc->peer.primary_path) + printl("*"); + + printl("%pISc %2u %8u %8u %8u %8u %8u ", + &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh, + sp->flight_size, sp->partial_bytes_acked, + sp->pathmtu); + } + printl("\n"); + } + + jprobe_return(); + return 0; +} + +static struct jprobe sctp_recv_probe = { + .kp = { + .symbol_name = "sctp_sf_eat_sack_6_2", + }, + .entry = jsctp_sf_eat_sack, +}; + +static __init int sctp_setup_jprobe(void) +{ + int ret = register_jprobe(&sctp_recv_probe); + + if (ret) { + if (request_module("sctp")) + goto out; + ret = register_jprobe(&sctp_recv_probe); + } + +out: + return ret; +} + +static __init int sctpprobe_init(void) +{ + int ret = -ENOMEM; + + /* Warning: if the function signature of sctp_sf_eat_sack_6_2, + * has been changed, you also have to change the signature of + * jsctp_sf_eat_sack, otherwise you end up right here! + */ + BUILD_BUG_ON(__same_type(sctp_sf_eat_sack_6_2, + jsctp_sf_eat_sack) == 0); + + init_waitqueue_head(&sctpw.wait); + spin_lock_init(&sctpw.lock); + if (kfifo_alloc(&sctpw.fifo, bufsize, GFP_KERNEL)) + return ret; + + if (!proc_create(procname, S_IRUSR, init_net.proc_net, + &sctpprobe_fops)) + goto free_kfifo; + + ret = sctp_setup_jprobe(); + if (ret) + goto remove_proc; + + pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n", + port, fwmark, bufsize); + return 0; + +remove_proc: + remove_proc_entry(procname, init_net.proc_net); +free_kfifo: + kfifo_free(&sctpw.fifo); + return ret; +} + +static __exit void sctpprobe_exit(void) +{ + kfifo_free(&sctpw.fifo); + remove_proc_entry(procname, init_net.proc_net); + unregister_jprobe(&sctp_recv_probe); +} + +module_init(sctpprobe_init); +module_exit(sctpprobe_exit); diff --git a/net/sctp/proc.c b/net/sctp/proc.c new file mode 100644 index 000000000..0697eda5a --- /dev/null +++ b/net/sctp/proc.c @@ -0,0 +1,555 @@ +/* SCTP kernel implementation + * Copyright (c) 2003 International Business Machines, Corp. + * + * This file is part of the SCTP kernel implementation + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#include <linux/types.h> +#include <linux/seq_file.h> +#include <linux/init.h> +#include <linux/export.h> +#include <net/sctp/sctp.h> +#include <net/ip.h> /* for snmp_fold_field */ + +static const struct snmp_mib sctp_snmp_list[] = { + SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), + SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), + SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), + SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), + SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), + SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), + SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), + SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), + SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), + SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), + SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), + SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), + SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), + SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), + SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), + SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), + SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), + SNMP_MIB_ITEM("SctpT1InitExpireds", SCTP_MIB_T1_INIT_EXPIREDS), + SNMP_MIB_ITEM("SctpT1CookieExpireds", SCTP_MIB_T1_COOKIE_EXPIREDS), + SNMP_MIB_ITEM("SctpT2ShutdownExpireds", SCTP_MIB_T2_SHUTDOWN_EXPIREDS), + SNMP_MIB_ITEM("SctpT3RtxExpireds", SCTP_MIB_T3_RTX_EXPIREDS), + SNMP_MIB_ITEM("SctpT4RtoExpireds", SCTP_MIB_T4_RTO_EXPIREDS), + SNMP_MIB_ITEM("SctpT5ShutdownGuardExpireds", SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS), + SNMP_MIB_ITEM("SctpDelaySackExpireds", SCTP_MIB_DELAY_SACK_EXPIREDS), + SNMP_MIB_ITEM("SctpAutocloseExpireds", SCTP_MIB_AUTOCLOSE_EXPIREDS), + SNMP_MIB_ITEM("SctpT3Retransmits", SCTP_MIB_T3_RETRANSMITS), + SNMP_MIB_ITEM("SctpPmtudRetransmits", SCTP_MIB_PMTUD_RETRANSMITS), + SNMP_MIB_ITEM("SctpFastRetransmits", SCTP_MIB_FAST_RETRANSMITS), + SNMP_MIB_ITEM("SctpInPktSoftirq", SCTP_MIB_IN_PKT_SOFTIRQ), + SNMP_MIB_ITEM("SctpInPktBacklog", SCTP_MIB_IN_PKT_BACKLOG), + SNMP_MIB_ITEM("SctpInPktDiscards", SCTP_MIB_IN_PKT_DISCARDS), + SNMP_MIB_ITEM("SctpInDataChunkDiscards", SCTP_MIB_IN_DATA_CHUNK_DISCARDS), + SNMP_MIB_SENTINEL +}; + +/* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ +static int sctp_snmp_seq_show(struct seq_file *seq, void *v) +{ + struct net *net = seq->private; + int i; + + for (i = 0; sctp_snmp_list[i].name != NULL; i++) + seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, + snmp_fold_field(net->sctp.sctp_statistics, + sctp_snmp_list[i].entry)); + + return 0; +} + +/* Initialize the seq file operations for 'snmp' object. */ +static int sctp_snmp_seq_open(struct inode *inode, struct file *file) +{ + return single_open_net(inode, file, sctp_snmp_seq_show); +} + +static const struct file_operations sctp_snmp_seq_fops = { + .owner = THIS_MODULE, + .open = sctp_snmp_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release_net, +}; + +/* Set up the proc fs entry for 'snmp' object. */ +int __net_init sctp_snmp_proc_init(struct net *net) +{ + struct proc_dir_entry *p; + + p = proc_create("snmp", S_IRUGO, net->sctp.proc_net_sctp, + &sctp_snmp_seq_fops); + if (!p) + return -ENOMEM; + + return 0; +} + +/* Cleanup the proc fs entry for 'snmp' object. */ +void sctp_snmp_proc_exit(struct net *net) +{ + remove_proc_entry("snmp", net->sctp.proc_net_sctp); +} + +/* Dump local addresses of an association/endpoint. */ +static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) +{ + struct sctp_association *asoc; + struct sctp_sockaddr_entry *laddr; + struct sctp_transport *peer; + union sctp_addr *addr, *primary = NULL; + struct sctp_af *af; + + if (epb->type == SCTP_EP_TYPE_ASSOCIATION) { + asoc = sctp_assoc(epb); + + peer = asoc->peer.primary_path; + if (unlikely(peer == NULL)) { + WARN(1, "Association %p with NULL primary path!\n", asoc); + return; + } + + primary = &peer->saddr; + } + + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &epb->bind_addr.address_list, list) { + if (!laddr->valid) + continue; + + addr = &laddr->a; + af = sctp_get_af_specific(addr->sa.sa_family); + if (primary && af->cmp_addr(addr, primary)) { + seq_printf(seq, "*"); + } + af->seq_dump_addr(seq, addr); + } + rcu_read_unlock(); +} + +/* Dump remote addresses of an association. */ +static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) +{ + struct sctp_transport *transport; + union sctp_addr *addr, *primary; + struct sctp_af *af; + + primary = &assoc->peer.primary_addr; + rcu_read_lock(); + list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, + transports) { + addr = &transport->ipaddr; + if (transport->dead) + continue; + + af = sctp_get_af_specific(addr->sa.sa_family); + if (af->cmp_addr(addr, primary)) { + seq_printf(seq, "*"); + } + af->seq_dump_addr(seq, addr); + } + rcu_read_unlock(); +} + +static void *sctp_eps_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= sctp_ep_hashsize) + return NULL; + + if (*pos < 0) + *pos = 0; + + if (*pos == 0) + seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT UID INODE LADDRS\n"); + + return (void *)pos; +} + +static void sctp_eps_seq_stop(struct seq_file *seq, void *v) +{ +} + + +static void *sctp_eps_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + if (++*pos >= sctp_ep_hashsize) + return NULL; + + return pos; +} + + +/* Display sctp endpoints (/proc/net/sctp/eps). */ +static int sctp_eps_seq_show(struct seq_file *seq, void *v) +{ + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + struct sctp_endpoint *ep; + struct sock *sk; + int hash = *(loff_t *)v; + + if (hash >= sctp_ep_hashsize) + return -ENOMEM; + + head = &sctp_ep_hashtable[hash]; + local_bh_disable(); + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + ep = sctp_ep(epb); + sk = epb->sk; + if (!net_eq(sock_net(sk), seq_file_net(seq))) + continue; + seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk, + sctp_sk(sk)->type, sk->sk_state, hash, + epb->bind_addr.port, + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), + sock_i_ino(sk)); + + sctp_seq_dump_local_addrs(seq, epb); + seq_printf(seq, "\n"); + } + read_unlock(&head->lock); + local_bh_enable(); + + return 0; +} + +static const struct seq_operations sctp_eps_ops = { + .start = sctp_eps_seq_start, + .next = sctp_eps_seq_next, + .stop = sctp_eps_seq_stop, + .show = sctp_eps_seq_show, +}; + + +/* Initialize the seq file operations for 'eps' object. */ +static int sctp_eps_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &sctp_eps_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations sctp_eps_seq_fops = { + .open = sctp_eps_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +/* Set up the proc fs entry for 'eps' object. */ +int __net_init sctp_eps_proc_init(struct net *net) +{ + struct proc_dir_entry *p; + + p = proc_create("eps", S_IRUGO, net->sctp.proc_net_sctp, + &sctp_eps_seq_fops); + if (!p) + return -ENOMEM; + + return 0; +} + +/* Cleanup the proc fs entry for 'eps' object. */ +void sctp_eps_proc_exit(struct net *net) +{ + remove_proc_entry("eps", net->sctp.proc_net_sctp); +} + + +static void *sctp_assocs_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= sctp_assoc_hashsize) + return NULL; + + if (*pos < 0) + *pos = 0; + + if (*pos == 0) + seq_printf(seq, " ASSOC SOCK STY SST ST HBKT " + "ASSOC-ID TX_QUEUE RX_QUEUE UID INODE LPORT " + "RPORT LADDRS <-> RADDRS " + "HBINT INS OUTS MAXRT T1X T2X RTXC " + "wmema wmemq sndbuf rcvbuf\n"); + + return (void *)pos; +} + +static void sctp_assocs_seq_stop(struct seq_file *seq, void *v) +{ +} + + +static void *sctp_assocs_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + if (++*pos >= sctp_assoc_hashsize) + return NULL; + + return pos; +} + +/* Display sctp associations (/proc/net/sctp/assocs). */ +static int sctp_assocs_seq_show(struct seq_file *seq, void *v) +{ + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + struct sctp_association *assoc; + struct sock *sk; + int hash = *(loff_t *)v; + + if (hash >= sctp_assoc_hashsize) + return -ENOMEM; + + head = &sctp_assoc_hashtable[hash]; + local_bh_disable(); + read_lock(&head->lock); + sctp_for_each_hentry(epb, &head->chain) { + assoc = sctp_assoc(epb); + sk = epb->sk; + if (!net_eq(sock_net(sk), seq_file_net(seq))) + continue; + seq_printf(seq, + "%8pK %8pK %-3d %-3d %-2d %-4d " + "%4d %8d %8d %7u %5lu %-5d %5d ", + assoc, sk, sctp_sk(sk)->type, sk->sk_state, + assoc->state, hash, + assoc->assoc_id, + assoc->sndbuf_used, + atomic_read(&assoc->rmem_alloc), + from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)), + sock_i_ino(sk), + epb->bind_addr.port, + assoc->peer.port); + seq_printf(seq, " "); + sctp_seq_dump_local_addrs(seq, epb); + seq_printf(seq, "<-> "); + sctp_seq_dump_remote_addrs(seq, assoc); + seq_printf(seq, "\t%8lu %5d %5d %4d %4d %4d %8d " + "%8d %8d %8d %8d", + assoc->hbinterval, assoc->c.sinit_max_instreams, + assoc->c.sinit_num_ostreams, assoc->max_retrans, + assoc->init_retries, assoc->shutdown_retries, + assoc->rtx_data_chunks, + atomic_read(&sk->sk_wmem_alloc), + sk->sk_wmem_queued, + sk->sk_sndbuf, + sk->sk_rcvbuf); + seq_printf(seq, "\n"); + } + read_unlock(&head->lock); + local_bh_enable(); + + return 0; +} + +static const struct seq_operations sctp_assoc_ops = { + .start = sctp_assocs_seq_start, + .next = sctp_assocs_seq_next, + .stop = sctp_assocs_seq_stop, + .show = sctp_assocs_seq_show, +}; + +/* Initialize the seq file operations for 'assocs' object. */ +static int sctp_assocs_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &sctp_assoc_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations sctp_assocs_seq_fops = { + .open = sctp_assocs_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +/* Set up the proc fs entry for 'assocs' object. */ +int __net_init sctp_assocs_proc_init(struct net *net) +{ + struct proc_dir_entry *p; + + p = proc_create("assocs", S_IRUGO, net->sctp.proc_net_sctp, + &sctp_assocs_seq_fops); + if (!p) + return -ENOMEM; + + return 0; +} + +/* Cleanup the proc fs entry for 'assocs' object. */ +void sctp_assocs_proc_exit(struct net *net) +{ + remove_proc_entry("assocs", net->sctp.proc_net_sctp); +} + +static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= sctp_assoc_hashsize) + return NULL; + + if (*pos < 0) + *pos = 0; + + if (*pos == 0) + seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " + "REM_ADDR_RTX START STATE\n"); + + return (void *)pos; +} + +static void *sctp_remaddr_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + if (++*pos >= sctp_assoc_hashsize) + return NULL; + + return pos; +} + +static void sctp_remaddr_seq_stop(struct seq_file *seq, void *v) +{ +} + +static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) +{ + struct sctp_hashbucket *head; + struct sctp_ep_common *epb; + struct sctp_association *assoc; + struct sctp_transport *tsp; + int hash = *(loff_t *)v; + + if (hash >= sctp_assoc_hashsize) + return -ENOMEM; + + head = &sctp_assoc_hashtable[hash]; + local_bh_disable(); + read_lock(&head->lock); + rcu_read_lock(); + sctp_for_each_hentry(epb, &head->chain) { + if (!net_eq(sock_net(epb->sk), seq_file_net(seq))) + continue; + assoc = sctp_assoc(epb); + list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, + transports) { + if (tsp->dead) + continue; + + /* + * The remote address (ADDR) + */ + tsp->af_specific->seq_dump_addr(seq, &tsp->ipaddr); + seq_printf(seq, " "); + + /* + * The association ID (ASSOC_ID) + */ + seq_printf(seq, "%d ", tsp->asoc->assoc_id); + + /* + * If the Heartbeat is active (HB_ACT) + * Note: 1 = Active, 0 = Inactive + */ + seq_printf(seq, "%d ", timer_pending(&tsp->hb_timer)); + + /* + * Retransmit time out (RTO) + */ + seq_printf(seq, "%lu ", tsp->rto); + + /* + * Maximum path retransmit count (PATH_MAX_RTX) + */ + seq_printf(seq, "%d ", tsp->pathmaxrxt); + + /* + * remote address retransmit count (REM_ADDR_RTX) + * Note: We don't have a way to tally this at the moment + * so lets just leave it as zero for the moment + */ + seq_puts(seq, "0 "); + + /* + * remote address start time (START). This is also not + * currently implemented, but we can record it with a + * jiffies marker in a subsequent patch + */ + seq_puts(seq, "0 "); + + /* + * The current state of this destination. I.e. + * SCTP_ACTIVE, SCTP_INACTIVE, ... + */ + seq_printf(seq, "%d", tsp->state); + + seq_printf(seq, "\n"); + } + } + + rcu_read_unlock(); + read_unlock(&head->lock); + local_bh_enable(); + + return 0; + +} + +static const struct seq_operations sctp_remaddr_ops = { + .start = sctp_remaddr_seq_start, + .next = sctp_remaddr_seq_next, + .stop = sctp_remaddr_seq_stop, + .show = sctp_remaddr_seq_show, +}; + +/* Cleanup the proc fs entry for 'remaddr' object. */ +void sctp_remaddr_proc_exit(struct net *net) +{ + remove_proc_entry("remaddr", net->sctp.proc_net_sctp); +} + +static int sctp_remaddr_seq_open(struct inode *inode, struct file *file) +{ + return seq_open_net(inode, file, &sctp_remaddr_ops, + sizeof(struct seq_net_private)); +} + +static const struct file_operations sctp_remaddr_seq_fops = { + .open = sctp_remaddr_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_net, +}; + +int __net_init sctp_remaddr_proc_init(struct net *net) +{ + struct proc_dir_entry *p; + + p = proc_create("remaddr", S_IRUGO, net->sctp.proc_net_sctp, + &sctp_remaddr_seq_fops); + if (!p) + return -ENOMEM; + return 0; +} diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c new file mode 100644 index 000000000..53b7acde9 --- /dev/null +++ b/net/sctp/protocol.c @@ -0,0 +1,1550 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * Initialization/cleanup for SCTP protocol support. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/seq_file.h> +#include <linux/bootmem.h> +#include <linux/highmem.h> +#include <linux/swap.h> +#include <linux/slab.h> +#include <net/net_namespace.h> +#include <net/protocol.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/route.h> +#include <net/sctp/sctp.h> +#include <net/addrconf.h> +#include <net/inet_common.h> +#include <net/inet_ecn.h> + +/* Global data structures. */ +struct sctp_globals sctp_globals __read_mostly; + +struct idr sctp_assocs_id; +DEFINE_SPINLOCK(sctp_assocs_id_lock); + +static struct sctp_pf *sctp_pf_inet6_specific; +static struct sctp_pf *sctp_pf_inet_specific; +static struct sctp_af *sctp_af_v4_specific; +static struct sctp_af *sctp_af_v6_specific; + +struct kmem_cache *sctp_chunk_cachep __read_mostly; +struct kmem_cache *sctp_bucket_cachep __read_mostly; + +long sysctl_sctp_mem[3]; +int sysctl_sctp_rmem[3]; +int sysctl_sctp_wmem[3]; + +/* Set up the proc fs entry for the SCTP protocol. */ +static int __net_init sctp_proc_init(struct net *net) +{ +#ifdef CONFIG_PROC_FS + net->sctp.proc_net_sctp = proc_net_mkdir(net, "sctp", net->proc_net); + if (!net->sctp.proc_net_sctp) + goto out_proc_net_sctp; + if (sctp_snmp_proc_init(net)) + goto out_snmp_proc_init; + if (sctp_eps_proc_init(net)) + goto out_eps_proc_init; + if (sctp_assocs_proc_init(net)) + goto out_assocs_proc_init; + if (sctp_remaddr_proc_init(net)) + goto out_remaddr_proc_init; + + return 0; + +out_remaddr_proc_init: + sctp_assocs_proc_exit(net); +out_assocs_proc_init: + sctp_eps_proc_exit(net); +out_eps_proc_init: + sctp_snmp_proc_exit(net); +out_snmp_proc_init: + remove_proc_entry("sctp", net->proc_net); + net->sctp.proc_net_sctp = NULL; +out_proc_net_sctp: + return -ENOMEM; +#endif /* CONFIG_PROC_FS */ + return 0; +} + +/* Clean up the proc fs entry for the SCTP protocol. + * Note: Do not make this __exit as it is used in the init error + * path. + */ +static void sctp_proc_exit(struct net *net) +{ +#ifdef CONFIG_PROC_FS + sctp_snmp_proc_exit(net); + sctp_eps_proc_exit(net); + sctp_assocs_proc_exit(net); + sctp_remaddr_proc_exit(net); + + remove_proc_entry("sctp", net->proc_net); + net->sctp.proc_net_sctp = NULL; +#endif +} + +/* Private helper to extract ipv4 address and stash them in + * the protocol structure. + */ +static void sctp_v4_copy_addrlist(struct list_head *addrlist, + struct net_device *dev) +{ + struct in_device *in_dev; + struct in_ifaddr *ifa; + struct sctp_sockaddr_entry *addr; + + rcu_read_lock(); + if ((in_dev = __in_dev_get_rcu(dev)) == NULL) { + rcu_read_unlock(); + return; + } + + for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { + /* Add the address to the local list. */ + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); + if (addr) { + addr->a.v4.sin_family = AF_INET; + addr->a.v4.sin_port = 0; + addr->a.v4.sin_addr.s_addr = ifa->ifa_local; + addr->valid = 1; + INIT_LIST_HEAD(&addr->list); + list_add_tail(&addr->list, addrlist); + } + } + + rcu_read_unlock(); +} + +/* Extract our IP addresses from the system and stash them in the + * protocol structure. + */ +static void sctp_get_local_addr_list(struct net *net) +{ + struct net_device *dev; + struct list_head *pos; + struct sctp_af *af; + + rcu_read_lock(); + for_each_netdev_rcu(net, dev) { + list_for_each(pos, &sctp_address_families) { + af = list_entry(pos, struct sctp_af, list); + af->copy_addrlist(&net->sctp.local_addr_list, dev); + } + } + rcu_read_unlock(); +} + +/* Free the existing local addresses. */ +static void sctp_free_local_addr_list(struct net *net) +{ + struct sctp_sockaddr_entry *addr; + struct list_head *pos, *temp; + + list_for_each_safe(pos, temp, &net->sctp.local_addr_list) { + addr = list_entry(pos, struct sctp_sockaddr_entry, list); + list_del(pos); + kfree(addr); + } +} + +/* Copy the local addresses which are valid for 'scope' into 'bp'. */ +int sctp_copy_local_addr_list(struct net *net, struct sctp_bind_addr *bp, + sctp_scope_t scope, gfp_t gfp, int copy_flags) +{ + struct sctp_sockaddr_entry *addr; + int error = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { + if (!addr->valid) + continue; + if (sctp_in_scope(net, &addr->a, scope)) { + /* Now that the address is in scope, check to see if + * the address type is really supported by the local + * sock as well as the remote peer. + */ + if ((((AF_INET == addr->a.sa.sa_family) && + (copy_flags & SCTP_ADDR4_PEERSUPP))) || + (((AF_INET6 == addr->a.sa.sa_family) && + (copy_flags & SCTP_ADDR6_ALLOWED) && + (copy_flags & SCTP_ADDR6_PEERSUPP)))) { + error = sctp_add_bind_addr(bp, &addr->a, + SCTP_ADDR_SRC, GFP_ATOMIC); + if (error) + goto end_copy; + } + } + } + +end_copy: + rcu_read_unlock(); + return error; +} + +/* Initialize a sctp_addr from in incoming skb. */ +static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, + int is_saddr) +{ + void *from; + __be16 *port; + struct sctphdr *sh; + + port = &addr->v4.sin_port; + addr->v4.sin_family = AF_INET; + + sh = sctp_hdr(skb); + if (is_saddr) { + *port = sh->source; + from = &ip_hdr(skb)->saddr; + } else { + *port = sh->dest; + from = &ip_hdr(skb)->daddr; + } + memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); +} + +/* Initialize an sctp_addr from a socket. */ +static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) +{ + addr->v4.sin_family = AF_INET; + addr->v4.sin_port = 0; + addr->v4.sin_addr.s_addr = inet_sk(sk)->inet_rcv_saddr; +} + +/* Initialize sk->sk_rcv_saddr from sctp_addr. */ +static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) +{ + inet_sk(sk)->inet_rcv_saddr = addr->v4.sin_addr.s_addr; +} + +/* Initialize sk->sk_daddr from sctp_addr. */ +static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) +{ + inet_sk(sk)->inet_daddr = addr->v4.sin_addr.s_addr; +} + +/* Initialize a sctp_addr from an address parameter. */ +static void sctp_v4_from_addr_param(union sctp_addr *addr, + union sctp_addr_param *param, + __be16 port, int iif) +{ + addr->v4.sin_family = AF_INET; + addr->v4.sin_port = port; + addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; +} + +/* Initialize an address parameter from a sctp_addr and return the length + * of the address parameter. + */ +static int sctp_v4_to_addr_param(const union sctp_addr *addr, + union sctp_addr_param *param) +{ + int length = sizeof(sctp_ipv4addr_param_t); + + param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; + param->v4.param_hdr.length = htons(length); + param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; + + return length; +} + +/* Initialize a sctp_addr from a dst_entry. */ +static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4, + __be16 port) +{ + saddr->v4.sin_family = AF_INET; + saddr->v4.sin_port = port; + saddr->v4.sin_addr.s_addr = fl4->saddr; +} + +/* Compare two addresses exactly. */ +static int sctp_v4_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2) +{ + if (addr1->sa.sa_family != addr2->sa.sa_family) + return 0; + if (addr1->v4.sin_port != addr2->v4.sin_port) + return 0; + if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr) + return 0; + + return 1; +} + +/* Initialize addr struct to INADDR_ANY. */ +static void sctp_v4_inaddr_any(union sctp_addr *addr, __be16 port) +{ + addr->v4.sin_family = AF_INET; + addr->v4.sin_addr.s_addr = htonl(INADDR_ANY); + addr->v4.sin_port = port; +} + +/* Is this a wildcard address? */ +static int sctp_v4_is_any(const union sctp_addr *addr) +{ + return htonl(INADDR_ANY) == addr->v4.sin_addr.s_addr; +} + +/* This function checks if the address is a valid address to be used for + * SCTP binding. + * + * Output: + * Return 0 - If the address is a non-unicast or an illegal address. + * Return 1 - If the address is a unicast. + */ +static int sctp_v4_addr_valid(union sctp_addr *addr, + struct sctp_sock *sp, + const struct sk_buff *skb) +{ + /* IPv4 addresses not allowed */ + if (sp && ipv6_only_sock(sctp_opt2sk(sp))) + return 0; + + /* Is this a non-unicast address or a unusable SCTP address? */ + if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) + return 0; + + /* Is this a broadcast address? */ + if (skb && skb_rtable(skb)->rt_flags & RTCF_BROADCAST) + return 0; + + return 1; +} + +/* Should this be available for binding? */ +static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) +{ + struct net *net = sock_net(&sp->inet.sk); + int ret = inet_addr_type(net, addr->v4.sin_addr.s_addr); + + + if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && + ret != RTN_LOCAL && + !sp->inet.freebind && + !net->ipv4.sysctl_ip_nonlocal_bind) + return 0; + + if (ipv6_only_sock(sctp_opt2sk(sp))) + return 0; + + return 1; +} + +/* Checking the loopback, private and other address scopes as defined in + * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4 + * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>. + * + * Level 0 - unusable SCTP addresses + * Level 1 - loopback address + * Level 2 - link-local addresses + * Level 3 - private addresses. + * Level 4 - global addresses + * For INIT and INIT-ACK address list, let L be the level of + * of requested destination address, sender and receiver + * SHOULD include all of its addresses with level greater + * than or equal to L. + * + * IPv4 scoping can be controlled through sysctl option + * net.sctp.addr_scope_policy + */ +static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) +{ + sctp_scope_t retval; + + /* Check for unusable SCTP addresses. */ + if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) { + retval = SCTP_SCOPE_UNUSABLE; + } else if (ipv4_is_loopback(addr->v4.sin_addr.s_addr)) { + retval = SCTP_SCOPE_LOOPBACK; + } else if (ipv4_is_linklocal_169(addr->v4.sin_addr.s_addr)) { + retval = SCTP_SCOPE_LINK; + } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) || + ipv4_is_private_172(addr->v4.sin_addr.s_addr) || + ipv4_is_private_192(addr->v4.sin_addr.s_addr)) { + retval = SCTP_SCOPE_PRIVATE; + } else { + retval = SCTP_SCOPE_GLOBAL; + } + + return retval; +} + +/* Returns a valid dst cache entry for the given source and destination ip + * addresses. If an association is passed, trys to get a dst entry with a + * source address that matches an address in the bind address list. + */ +static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr, + struct flowi *fl, struct sock *sk) +{ + struct sctp_association *asoc = t->asoc; + struct rtable *rt; + struct flowi4 *fl4 = &fl->u.ip4; + struct sctp_bind_addr *bp; + struct sctp_sockaddr_entry *laddr; + struct dst_entry *dst = NULL; + union sctp_addr *daddr = &t->ipaddr; + union sctp_addr dst_saddr; + + memset(fl4, 0x0, sizeof(struct flowi4)); + fl4->daddr = daddr->v4.sin_addr.s_addr; + fl4->fl4_dport = daddr->v4.sin_port; + fl4->flowi4_proto = IPPROTO_SCTP; + if (asoc) { + fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); + fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if; + fl4->fl4_sport = htons(asoc->base.bind_addr.port); + } + if (saddr) { + fl4->saddr = saddr->v4.sin_addr.s_addr; + fl4->fl4_sport = saddr->v4.sin_port; + } + + pr_debug("%s: dst:%pI4, src:%pI4 - ", __func__, &fl4->daddr, + &fl4->saddr); + + rt = ip_route_output_key(sock_net(sk), fl4); + if (!IS_ERR(rt)) + dst = &rt->dst; + + /* If there is no association or if a source address is passed, no + * more validation is required. + */ + if (!asoc || saddr) + goto out; + + bp = &asoc->base.bind_addr; + + if (dst) { + /* Walk through the bind address list and look for a bind + * address that matches the source address of the returned dst. + */ + sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port)); + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) || + (laddr->state != SCTP_ADDR_SRC && + !asoc->src_out_of_asoc_ok)) + continue; + if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) + goto out_unlock; + } + rcu_read_unlock(); + + /* None of the bound addresses match the source address of the + * dst. So release it. + */ + dst_release(dst); + dst = NULL; + } + + /* Walk through the bind address list and try to get a dst that + * matches a bind address as the source address. + */ + rcu_read_lock(); + list_for_each_entry_rcu(laddr, &bp->address_list, list) { + if (!laddr->valid) + continue; + if ((laddr->state == SCTP_ADDR_SRC) && + (AF_INET == laddr->a.sa.sa_family)) { + fl4->fl4_sport = laddr->a.v4.sin_port; + flowi4_update_output(fl4, + asoc->base.sk->sk_bound_dev_if, + RT_CONN_FLAGS(asoc->base.sk), + daddr->v4.sin_addr.s_addr, + laddr->a.v4.sin_addr.s_addr); + + rt = ip_route_output_key(sock_net(sk), fl4); + if (!IS_ERR(rt)) { + dst = &rt->dst; + goto out_unlock; + } + } + } + +out_unlock: + rcu_read_unlock(); +out: + t->dst = dst; + if (dst) + pr_debug("rt_dst:%pI4, rt_src:%pI4\n", + &fl4->daddr, &fl4->saddr); + else + pr_debug("no route\n"); +} + +/* For v4, the source address is cached in the route entry(dst). So no need + * to cache it separately and hence this is an empty routine. + */ +static void sctp_v4_get_saddr(struct sctp_sock *sk, + struct sctp_transport *t, + struct flowi *fl) +{ + union sctp_addr *saddr = &t->saddr; + struct rtable *rt = (struct rtable *)t->dst; + + if (rt) { + saddr->v4.sin_family = AF_INET; + saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr; + } +} + +/* What interface did this skb arrive on? */ +static int sctp_v4_skb_iif(const struct sk_buff *skb) +{ + return inet_iif(skb); +} + +/* Was this packet marked by Explicit Congestion Notification? */ +static int sctp_v4_is_ce(const struct sk_buff *skb) +{ + return INET_ECN_is_ce(ip_hdr(skb)->tos); +} + +/* Create and initialize a new sk for the socket returned by accept(). */ +static struct sock *sctp_v4_create_accept_sk(struct sock *sk, + struct sctp_association *asoc) +{ + struct sock *newsk = sk_alloc(sock_net(sk), PF_INET, GFP_KERNEL, + sk->sk_prot); + struct inet_sock *newinet; + + if (!newsk) + goto out; + + sock_init_data(NULL, newsk); + + sctp_copy_sock(newsk, sk, asoc); + sock_reset_flag(newsk, SOCK_ZAPPED); + + newinet = inet_sk(newsk); + + newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; + + sk_refcnt_debug_inc(newsk); + + if (newsk->sk_prot->init(newsk)) { + sk_common_release(newsk); + newsk = NULL; + } + +out: + return newsk; +} + +static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) +{ + /* No address mapping for V4 sockets */ + return sizeof(struct sockaddr_in); +} + +/* Dump the v4 addr to the seq file. */ +static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) +{ + seq_printf(seq, "%pI4 ", &addr->v4.sin_addr); +} + +static void sctp_v4_ecn_capable(struct sock *sk) +{ + INET_ECN_xmit(sk); +} + +static void sctp_addr_wq_timeout_handler(unsigned long arg) +{ + struct net *net = (struct net *)arg; + struct sctp_sockaddr_entry *addrw, *temp; + struct sctp_sock *sp; + + spin_lock_bh(&net->sctp.addr_wq_lock); + + list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { + pr_debug("%s: the first ent in wq:%p is addr:%pISc for cmd:%d at " + "entry:%p\n", __func__, &net->sctp.addr_waitq, &addrw->a.sa, + addrw->state, addrw); + +#if IS_ENABLED(CONFIG_IPV6) + /* Now we send an ASCONF for each association */ + /* Note. we currently don't handle link local IPv6 addressees */ + if (addrw->a.sa.sa_family == AF_INET6) { + struct in6_addr *in6; + + if (ipv6_addr_type(&addrw->a.v6.sin6_addr) & + IPV6_ADDR_LINKLOCAL) + goto free_next; + + in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr; + if (ipv6_chk_addr(net, in6, NULL, 0) == 0 && + addrw->state == SCTP_ADDR_NEW) { + unsigned long timeo_val; + + pr_debug("%s: this is on DAD, trying %d sec " + "later\n", __func__, + SCTP_ADDRESS_TICK_DELAY); + + timeo_val = jiffies; + timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); + mod_timer(&net->sctp.addr_wq_timer, timeo_val); + break; + } + } +#endif + list_for_each_entry(sp, &net->sctp.auto_asconf_splist, auto_asconf_list) { + struct sock *sk; + + sk = sctp_opt2sk(sp); + /* ignore bound-specific endpoints */ + if (!sctp_is_ep_boundall(sk)) + continue; + bh_lock_sock(sk); + if (sctp_asconf_mgmt(sp, addrw) < 0) + pr_debug("%s: sctp_asconf_mgmt failed\n", __func__); + bh_unlock_sock(sk); + } +#if IS_ENABLED(CONFIG_IPV6) +free_next: +#endif + list_del(&addrw->list); + kfree(addrw); + } + spin_unlock_bh(&net->sctp.addr_wq_lock); +} + +static void sctp_free_addr_wq(struct net *net) +{ + struct sctp_sockaddr_entry *addrw; + struct sctp_sockaddr_entry *temp; + + spin_lock_bh(&net->sctp.addr_wq_lock); + del_timer(&net->sctp.addr_wq_timer); + list_for_each_entry_safe(addrw, temp, &net->sctp.addr_waitq, list) { + list_del(&addrw->list); + kfree(addrw); + } + spin_unlock_bh(&net->sctp.addr_wq_lock); +} + +/* lookup the entry for the same address in the addr_waitq + * sctp_addr_wq MUST be locked + */ +static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct net *net, + struct sctp_sockaddr_entry *addr) +{ + struct sctp_sockaddr_entry *addrw; + + list_for_each_entry(addrw, &net->sctp.addr_waitq, list) { + if (addrw->a.sa.sa_family != addr->a.sa.sa_family) + continue; + if (addrw->a.sa.sa_family == AF_INET) { + if (addrw->a.v4.sin_addr.s_addr == + addr->a.v4.sin_addr.s_addr) + return addrw; + } else if (addrw->a.sa.sa_family == AF_INET6) { + if (ipv6_addr_equal(&addrw->a.v6.sin6_addr, + &addr->a.v6.sin6_addr)) + return addrw; + } + } + return NULL; +} + +void sctp_addr_wq_mgmt(struct net *net, struct sctp_sockaddr_entry *addr, int cmd) +{ + struct sctp_sockaddr_entry *addrw; + unsigned long timeo_val; + + /* first, we check if an opposite message already exist in the queue. + * If we found such message, it is removed. + * This operation is a bit stupid, but the DHCP client attaches the + * new address after a couple of addition and deletion of that address + */ + + spin_lock_bh(&net->sctp.addr_wq_lock); + /* Offsets existing events in addr_wq */ + addrw = sctp_addr_wq_lookup(net, addr); + if (addrw) { + if (addrw->state != cmd) { + pr_debug("%s: offsets existing entry for %d, addr:%pISc " + "in wq:%p\n", __func__, addrw->state, &addrw->a.sa, + &net->sctp.addr_waitq); + + list_del(&addrw->list); + kfree(addrw); + } + spin_unlock_bh(&net->sctp.addr_wq_lock); + return; + } + + /* OK, we have to add the new address to the wait queue */ + addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); + if (addrw == NULL) { + spin_unlock_bh(&net->sctp.addr_wq_lock); + return; + } + addrw->state = cmd; + list_add_tail(&addrw->list, &net->sctp.addr_waitq); + + pr_debug("%s: add new entry for cmd:%d, addr:%pISc in wq:%p\n", + __func__, addrw->state, &addrw->a.sa, &net->sctp.addr_waitq); + + if (!timer_pending(&net->sctp.addr_wq_timer)) { + timeo_val = jiffies; + timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY); + mod_timer(&net->sctp.addr_wq_timer, timeo_val); + } + spin_unlock_bh(&net->sctp.addr_wq_lock); +} + +/* Event handler for inet address addition/deletion events. + * The sctp_local_addr_list needs to be protocted by a spin lock since + * multiple notifiers (say IPv4 and IPv6) may be running at the same + * time and thus corrupt the list. + * The reader side is protected with RCU. + */ +static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, + void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct sctp_sockaddr_entry *addr = NULL; + struct sctp_sockaddr_entry *temp; + struct net *net = dev_net(ifa->ifa_dev->dev); + int found = 0; + + switch (ev) { + case NETDEV_UP: + addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC); + if (addr) { + addr->a.v4.sin_family = AF_INET; + addr->a.v4.sin_port = 0; + addr->a.v4.sin_addr.s_addr = ifa->ifa_local; + addr->valid = 1; + spin_lock_bh(&net->sctp.local_addr_lock); + list_add_tail_rcu(&addr->list, &net->sctp.local_addr_list); + sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_NEW); + spin_unlock_bh(&net->sctp.local_addr_lock); + } + break; + case NETDEV_DOWN: + spin_lock_bh(&net->sctp.local_addr_lock); + list_for_each_entry_safe(addr, temp, + &net->sctp.local_addr_list, list) { + if (addr->a.sa.sa_family == AF_INET && + addr->a.v4.sin_addr.s_addr == + ifa->ifa_local) { + sctp_addr_wq_mgmt(net, addr, SCTP_ADDR_DEL); + found = 1; + addr->valid = 0; + list_del_rcu(&addr->list); + break; + } + } + spin_unlock_bh(&net->sctp.local_addr_lock); + if (found) + kfree_rcu(addr, rcu); + break; + } + + return NOTIFY_DONE; +} + +/* + * Initialize the control inode/socket with a control endpoint data + * structure. This endpoint is reserved exclusively for the OOTB processing. + */ +static int sctp_ctl_sock_init(struct net *net) +{ + int err; + sa_family_t family = PF_INET; + + if (sctp_get_pf_specific(PF_INET6)) + family = PF_INET6; + + err = inet_ctl_sock_create(&net->sctp.ctl_sock, family, + SOCK_SEQPACKET, IPPROTO_SCTP, net); + + /* If IPv6 socket could not be created, try the IPv4 socket */ + if (err < 0 && family == PF_INET6) + err = inet_ctl_sock_create(&net->sctp.ctl_sock, AF_INET, + SOCK_SEQPACKET, IPPROTO_SCTP, + net); + + if (err < 0) { + pr_err("Failed to create the SCTP control socket\n"); + return err; + } + return 0; +} + +/* Register address family specific functions. */ +int sctp_register_af(struct sctp_af *af) +{ + switch (af->sa_family) { + case AF_INET: + if (sctp_af_v4_specific) + return 0; + sctp_af_v4_specific = af; + break; + case AF_INET6: + if (sctp_af_v6_specific) + return 0; + sctp_af_v6_specific = af; + break; + default: + return 0; + } + + INIT_LIST_HEAD(&af->list); + list_add_tail(&af->list, &sctp_address_families); + return 1; +} + +/* Get the table of functions for manipulating a particular address + * family. + */ +struct sctp_af *sctp_get_af_specific(sa_family_t family) +{ + switch (family) { + case AF_INET: + return sctp_af_v4_specific; + case AF_INET6: + return sctp_af_v6_specific; + default: + return NULL; + } +} + +/* Common code to initialize a AF_INET msg_name. */ +static void sctp_inet_msgname(char *msgname, int *addr_len) +{ + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)msgname; + *addr_len = sizeof(struct sockaddr_in); + sin->sin_family = AF_INET; + memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); +} + +/* Copy the primary address of the peer primary address as the msg_name. */ +static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, + int *addr_len) +{ + struct sockaddr_in *sin, *sinfrom; + + if (msgname) { + struct sctp_association *asoc; + + asoc = event->asoc; + sctp_inet_msgname(msgname, addr_len); + sin = (struct sockaddr_in *)msgname; + sinfrom = &asoc->peer.primary_addr.v4; + sin->sin_port = htons(asoc->peer.port); + sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr; + } +} + +/* Initialize and copy out a msgname from an inbound skb. */ +static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) +{ + if (msgname) { + struct sctphdr *sh = sctp_hdr(skb); + struct sockaddr_in *sin = (struct sockaddr_in *)msgname; + + sctp_inet_msgname(msgname, len); + sin->sin_port = sh->source; + sin->sin_addr.s_addr = ip_hdr(skb)->saddr; + } +} + +/* Do we support this AF? */ +static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) +{ + /* PF_INET only supports AF_INET addresses. */ + return AF_INET == family; +} + +/* Address matching with wildcards allowed. */ +static int sctp_inet_cmp_addr(const union sctp_addr *addr1, + const union sctp_addr *addr2, + struct sctp_sock *opt) +{ + /* PF_INET only supports AF_INET addresses. */ + if (addr1->sa.sa_family != addr2->sa.sa_family) + return 0; + if (htonl(INADDR_ANY) == addr1->v4.sin_addr.s_addr || + htonl(INADDR_ANY) == addr2->v4.sin_addr.s_addr) + return 1; + if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) + return 1; + + return 0; +} + +/* Verify that provided sockaddr looks bindable. Common verification has + * already been taken care of. + */ +static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) +{ + return sctp_v4_available(addr, opt); +} + +/* Verify that sockaddr looks sendable. Common verification has already + * been taken care of. + */ +static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) +{ + return 1; +} + +/* Fill in Supported Address Type information for INIT and INIT-ACK + * chunks. Returns number of addresses supported. + */ +static int sctp_inet_supported_addrs(const struct sctp_sock *opt, + __be16 *types) +{ + types[0] = SCTP_PARAM_IPV4_ADDRESS; + return 1; +} + +/* Wrapper routine that calls the ip transmit routine. */ +static inline int sctp_v4_xmit(struct sk_buff *skb, + struct sctp_transport *transport) +{ + struct inet_sock *inet = inet_sk(skb->sk); + + pr_debug("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", __func__, skb, + skb->len, &transport->fl.u.ip4.saddr, &transport->fl.u.ip4.daddr); + + inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? + IP_PMTUDISC_DO : IP_PMTUDISC_DONT; + + SCTP_INC_STATS(sock_net(&inet->sk), SCTP_MIB_OUTSCTPPACKS); + + return ip_queue_xmit(&inet->sk, skb, &transport->fl); +} + +static struct sctp_af sctp_af_inet; + +static struct sctp_pf sctp_pf_inet = { + .event_msgname = sctp_inet_event_msgname, + .skb_msgname = sctp_inet_skb_msgname, + .af_supported = sctp_inet_af_supported, + .cmp_addr = sctp_inet_cmp_addr, + .bind_verify = sctp_inet_bind_verify, + .send_verify = sctp_inet_send_verify, + .supported_addrs = sctp_inet_supported_addrs, + .create_accept_sk = sctp_v4_create_accept_sk, + .addr_to_user = sctp_v4_addr_to_user, + .to_sk_saddr = sctp_v4_to_sk_saddr, + .to_sk_daddr = sctp_v4_to_sk_daddr, + .af = &sctp_af_inet +}; + +/* Notifier for inetaddr addition/deletion events. */ +static struct notifier_block sctp_inetaddr_notifier = { + .notifier_call = sctp_inetaddr_event, +}; + +/* Socket operations. */ +static const struct proto_ops inet_seqpacket_ops = { + .family = PF_INET, + .owner = THIS_MODULE, + .release = inet_release, /* Needs to be wrapped... */ + .bind = inet_bind, + .connect = inet_dgram_connect, + .socketpair = sock_no_socketpair, + .accept = inet_accept, + .getname = inet_getname, /* Semantics are different. */ + .poll = sctp_poll, + .ioctl = inet_ioctl, + .listen = sctp_inet_listen, + .shutdown = inet_shutdown, /* Looks harmless. */ + .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem */ + .getsockopt = sock_common_getsockopt, + .sendmsg = inet_sendmsg, + .recvmsg = sock_common_recvmsg, + .mmap = sock_no_mmap, + .sendpage = sock_no_sendpage, +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_sock_common_setsockopt, + .compat_getsockopt = compat_sock_common_getsockopt, +#endif +}; + +/* Registration with AF_INET family. */ +static struct inet_protosw sctp_seqpacket_protosw = { + .type = SOCK_SEQPACKET, + .protocol = IPPROTO_SCTP, + .prot = &sctp_prot, + .ops = &inet_seqpacket_ops, + .flags = SCTP_PROTOSW_FLAG +}; +static struct inet_protosw sctp_stream_protosw = { + .type = SOCK_STREAM, + .protocol = IPPROTO_SCTP, + .prot = &sctp_prot, + .ops = &inet_seqpacket_ops, + .flags = SCTP_PROTOSW_FLAG +}; + +/* Register with IP layer. */ +static const struct net_protocol sctp_protocol = { + .handler = sctp_rcv, + .err_handler = sctp_v4_err, + .no_policy = 1, + .netns_ok = 1, + .icmp_strict_tag_validation = 1, +}; + +/* IPv4 address related functions. */ +static struct sctp_af sctp_af_inet = { + .sa_family = AF_INET, + .sctp_xmit = sctp_v4_xmit, + .setsockopt = ip_setsockopt, + .getsockopt = ip_getsockopt, + .get_dst = sctp_v4_get_dst, + .get_saddr = sctp_v4_get_saddr, + .copy_addrlist = sctp_v4_copy_addrlist, + .from_skb = sctp_v4_from_skb, + .from_sk = sctp_v4_from_sk, + .from_addr_param = sctp_v4_from_addr_param, + .to_addr_param = sctp_v4_to_addr_param, + .cmp_addr = sctp_v4_cmp_addr, + .addr_valid = sctp_v4_addr_valid, + .inaddr_any = sctp_v4_inaddr_any, + .is_any = sctp_v4_is_any, + .available = sctp_v4_available, + .scope = sctp_v4_scope, + .skb_iif = sctp_v4_skb_iif, + .is_ce = sctp_v4_is_ce, + .seq_dump_addr = sctp_v4_seq_dump_addr, + .ecn_capable = sctp_v4_ecn_capable, + .net_header_len = sizeof(struct iphdr), + .sockaddr_len = sizeof(struct sockaddr_in), +#ifdef CONFIG_COMPAT + .compat_setsockopt = compat_ip_setsockopt, + .compat_getsockopt = compat_ip_getsockopt, +#endif +}; + +struct sctp_pf *sctp_get_pf_specific(sa_family_t family) +{ + switch (family) { + case PF_INET: + return sctp_pf_inet_specific; + case PF_INET6: + return sctp_pf_inet6_specific; + default: + return NULL; + } +} + +/* Register the PF specific function table. */ +int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) +{ + switch (family) { + case PF_INET: + if (sctp_pf_inet_specific) + return 0; + sctp_pf_inet_specific = pf; + break; + case PF_INET6: + if (sctp_pf_inet6_specific) + return 0; + sctp_pf_inet6_specific = pf; + break; + default: + return 0; + } + return 1; +} + +static inline int init_sctp_mibs(struct net *net) +{ + net->sctp.sctp_statistics = alloc_percpu(struct sctp_mib); + if (!net->sctp.sctp_statistics) + return -ENOMEM; + return 0; +} + +static inline void cleanup_sctp_mibs(struct net *net) +{ + free_percpu(net->sctp.sctp_statistics); +} + +static void sctp_v4_pf_init(void) +{ + /* Initialize the SCTP specific PF functions. */ + sctp_register_pf(&sctp_pf_inet, PF_INET); + sctp_register_af(&sctp_af_inet); +} + +static void sctp_v4_pf_exit(void) +{ + list_del(&sctp_af_inet.list); +} + +static int sctp_v4_protosw_init(void) +{ + int rc; + + rc = proto_register(&sctp_prot, 1); + if (rc) + return rc; + + /* Register SCTP(UDP and TCP style) with socket layer. */ + inet_register_protosw(&sctp_seqpacket_protosw); + inet_register_protosw(&sctp_stream_protosw); + + return 0; +} + +static void sctp_v4_protosw_exit(void) +{ + inet_unregister_protosw(&sctp_stream_protosw); + inet_unregister_protosw(&sctp_seqpacket_protosw); + proto_unregister(&sctp_prot); +} + +static int sctp_v4_add_protocol(void) +{ + /* Register notifier for inet address additions/deletions. */ + register_inetaddr_notifier(&sctp_inetaddr_notifier); + + /* Register SCTP with inet layer. */ + if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) + return -EAGAIN; + + return 0; +} + +static void sctp_v4_del_protocol(void) +{ + inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); + unregister_inetaddr_notifier(&sctp_inetaddr_notifier); +} + +static int __net_init sctp_net_init(struct net *net) +{ + int status; + + /* + * 14. Suggested SCTP Protocol Parameter Values + */ + /* The following protocol parameters are RECOMMENDED: */ + /* RTO.Initial - 3 seconds */ + net->sctp.rto_initial = SCTP_RTO_INITIAL; + /* RTO.Min - 1 second */ + net->sctp.rto_min = SCTP_RTO_MIN; + /* RTO.Max - 60 seconds */ + net->sctp.rto_max = SCTP_RTO_MAX; + /* RTO.Alpha - 1/8 */ + net->sctp.rto_alpha = SCTP_RTO_ALPHA; + /* RTO.Beta - 1/4 */ + net->sctp.rto_beta = SCTP_RTO_BETA; + + /* Valid.Cookie.Life - 60 seconds */ + net->sctp.valid_cookie_life = SCTP_DEFAULT_COOKIE_LIFE; + + /* Whether Cookie Preservative is enabled(1) or not(0) */ + net->sctp.cookie_preserve_enable = 1; + + /* Default sctp sockets to use md5 as their hmac alg */ +#if defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5) + net->sctp.sctp_hmac_alg = "md5"; +#elif defined (CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1) + net->sctp.sctp_hmac_alg = "sha1"; +#else + net->sctp.sctp_hmac_alg = NULL; +#endif + + /* Max.Burst - 4 */ + net->sctp.max_burst = SCTP_DEFAULT_MAX_BURST; + + /* Association.Max.Retrans - 10 attempts + * Path.Max.Retrans - 5 attempts (per destination address) + * Max.Init.Retransmits - 8 attempts + */ + net->sctp.max_retrans_association = 10; + net->sctp.max_retrans_path = 5; + net->sctp.max_retrans_init = 8; + + /* Sendbuffer growth - do per-socket accounting */ + net->sctp.sndbuf_policy = 0; + + /* Rcvbuffer growth - do per-socket accounting */ + net->sctp.rcvbuf_policy = 0; + + /* HB.interval - 30 seconds */ + net->sctp.hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; + + /* delayed SACK timeout */ + net->sctp.sack_timeout = SCTP_DEFAULT_TIMEOUT_SACK; + + /* Disable ADDIP by default. */ + net->sctp.addip_enable = 0; + net->sctp.addip_noauth = 0; + net->sctp.default_auto_asconf = 0; + + /* Enable PR-SCTP by default. */ + net->sctp.prsctp_enable = 1; + + /* Disable AUTH by default. */ + net->sctp.auth_enable = 0; + + /* Set SCOPE policy to enabled */ + net->sctp.scope_policy = SCTP_SCOPE_POLICY_ENABLE; + + /* Set the default rwnd update threshold */ + net->sctp.rwnd_upd_shift = SCTP_DEFAULT_RWND_SHIFT; + + /* Initialize maximum autoclose timeout. */ + net->sctp.max_autoclose = INT_MAX / HZ; + + status = sctp_sysctl_net_register(net); + if (status) + goto err_sysctl_register; + + /* Allocate and initialise sctp mibs. */ + status = init_sctp_mibs(net); + if (status) + goto err_init_mibs; + + /* Initialize proc fs directory. */ + status = sctp_proc_init(net); + if (status) + goto err_init_proc; + + sctp_dbg_objcnt_init(net); + + /* Initialize the control inode/socket for handling OOTB packets. */ + if ((status = sctp_ctl_sock_init(net))) { + pr_err("Failed to initialize the SCTP control sock\n"); + goto err_ctl_sock_init; + } + + /* Initialize the local address list. */ + INIT_LIST_HEAD(&net->sctp.local_addr_list); + spin_lock_init(&net->sctp.local_addr_lock); + sctp_get_local_addr_list(net); + + /* Initialize the address event list */ + INIT_LIST_HEAD(&net->sctp.addr_waitq); + INIT_LIST_HEAD(&net->sctp.auto_asconf_splist); + spin_lock_init(&net->sctp.addr_wq_lock); + net->sctp.addr_wq_timer.expires = 0; + setup_timer(&net->sctp.addr_wq_timer, sctp_addr_wq_timeout_handler, + (unsigned long)net); + + return 0; + +err_ctl_sock_init: + sctp_dbg_objcnt_exit(net); + sctp_proc_exit(net); +err_init_proc: + cleanup_sctp_mibs(net); +err_init_mibs: + sctp_sysctl_net_unregister(net); +err_sysctl_register: + return status; +} + +static void __net_exit sctp_net_exit(struct net *net) +{ + /* Free the local address list */ + sctp_free_addr_wq(net); + sctp_free_local_addr_list(net); + + /* Free the control endpoint. */ + inet_ctl_sock_destroy(net->sctp.ctl_sock); + + sctp_dbg_objcnt_exit(net); + + sctp_proc_exit(net); + cleanup_sctp_mibs(net); + sctp_sysctl_net_unregister(net); +} + +static struct pernet_operations sctp_net_ops = { + .init = sctp_net_init, + .exit = sctp_net_exit, +}; + +/* Initialize the universe into something sensible. */ +static __init int sctp_init(void) +{ + int i; + int status = -EINVAL; + unsigned long goal; + unsigned long limit; + int max_share; + int order; + + sock_skb_cb_check_size(sizeof(struct sctp_ulpevent)); + + /* Allocate bind_bucket and chunk caches. */ + status = -ENOBUFS; + sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", + sizeof(struct sctp_bind_bucket), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (!sctp_bucket_cachep) + goto out; + + sctp_chunk_cachep = kmem_cache_create("sctp_chunk", + sizeof(struct sctp_chunk), + 0, SLAB_HWCACHE_ALIGN, + NULL); + if (!sctp_chunk_cachep) + goto err_chunk_cachep; + + status = percpu_counter_init(&sctp_sockets_allocated, 0, GFP_KERNEL); + if (status) + goto err_percpu_counter_init; + + /* Implementation specific variables. */ + + /* Initialize default stream count setup information. */ + sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; + sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; + + /* Initialize handle used for association ids. */ + idr_init(&sctp_assocs_id); + + limit = nr_free_buffer_pages() / 8; + limit = max(limit, 128UL); + sysctl_sctp_mem[0] = limit / 4 * 3; + sysctl_sctp_mem[1] = limit; + sysctl_sctp_mem[2] = sysctl_sctp_mem[0] * 2; + + /* Set per-socket limits to no more than 1/128 the pressure threshold*/ + limit = (sysctl_sctp_mem[1]) << (PAGE_SHIFT - 7); + max_share = min(4UL*1024*1024, limit); + + sysctl_sctp_rmem[0] = SK_MEM_QUANTUM; /* give each asoc 1 page min */ + sysctl_sctp_rmem[1] = 1500 * SKB_TRUESIZE(1); + sysctl_sctp_rmem[2] = max(sysctl_sctp_rmem[1], max_share); + + sysctl_sctp_wmem[0] = SK_MEM_QUANTUM; + sysctl_sctp_wmem[1] = 16*1024; + sysctl_sctp_wmem[2] = max(64*1024, max_share); + + /* Size and allocate the association hash table. + * The methodology is similar to that of the tcp hash tables. + */ + if (totalram_pages >= (128 * 1024)) + goal = totalram_pages >> (22 - PAGE_SHIFT); + else + goal = totalram_pages >> (24 - PAGE_SHIFT); + + for (order = 0; (1UL << order) < goal; order++) + ; + + do { + sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / + sizeof(struct sctp_hashbucket); + if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) + continue; + sctp_assoc_hashtable = (struct sctp_hashbucket *) + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); + } while (!sctp_assoc_hashtable && --order > 0); + if (!sctp_assoc_hashtable) { + pr_err("Failed association hash alloc\n"); + status = -ENOMEM; + goto err_ahash_alloc; + } + for (i = 0; i < sctp_assoc_hashsize; i++) { + rwlock_init(&sctp_assoc_hashtable[i].lock); + INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain); + } + + /* Allocate and initialize the endpoint hash table. */ + sctp_ep_hashsize = 64; + sctp_ep_hashtable = + kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); + if (!sctp_ep_hashtable) { + pr_err("Failed endpoint_hash alloc\n"); + status = -ENOMEM; + goto err_ehash_alloc; + } + for (i = 0; i < sctp_ep_hashsize; i++) { + rwlock_init(&sctp_ep_hashtable[i].lock); + INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain); + } + + /* Allocate and initialize the SCTP port hash table. */ + do { + sctp_port_hashsize = (1UL << order) * PAGE_SIZE / + sizeof(struct sctp_bind_hashbucket); + if ((sctp_port_hashsize > (64 * 1024)) && order > 0) + continue; + sctp_port_hashtable = (struct sctp_bind_hashbucket *) + __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order); + } while (!sctp_port_hashtable && --order > 0); + if (!sctp_port_hashtable) { + pr_err("Failed bind hash alloc\n"); + status = -ENOMEM; + goto err_bhash_alloc; + } + for (i = 0; i < sctp_port_hashsize; i++) { + spin_lock_init(&sctp_port_hashtable[i].lock); + INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain); + } + + pr_info("Hash tables configured (established %d bind %d)\n", + sctp_assoc_hashsize, sctp_port_hashsize); + + sctp_sysctl_register(); + + INIT_LIST_HEAD(&sctp_address_families); + sctp_v4_pf_init(); + sctp_v6_pf_init(); + + status = sctp_v4_protosw_init(); + + if (status) + goto err_protosw_init; + + status = sctp_v6_protosw_init(); + if (status) + goto err_v6_protosw_init; + + status = register_pernet_subsys(&sctp_net_ops); + if (status) + goto err_register_pernet_subsys; + + status = sctp_v4_add_protocol(); + if (status) + goto err_add_protocol; + + /* Register SCTP with inet6 layer. */ + status = sctp_v6_add_protocol(); + if (status) + goto err_v6_add_protocol; + +out: + return status; +err_v6_add_protocol: + sctp_v4_del_protocol(); +err_add_protocol: + unregister_pernet_subsys(&sctp_net_ops); +err_register_pernet_subsys: + sctp_v6_protosw_exit(); +err_v6_protosw_init: + sctp_v4_protosw_exit(); +err_protosw_init: + sctp_v4_pf_exit(); + sctp_v6_pf_exit(); + sctp_sysctl_unregister(); + free_pages((unsigned long)sctp_port_hashtable, + get_order(sctp_port_hashsize * + sizeof(struct sctp_bind_hashbucket))); +err_bhash_alloc: + kfree(sctp_ep_hashtable); +err_ehash_alloc: + free_pages((unsigned long)sctp_assoc_hashtable, + get_order(sctp_assoc_hashsize * + sizeof(struct sctp_hashbucket))); +err_ahash_alloc: + percpu_counter_destroy(&sctp_sockets_allocated); +err_percpu_counter_init: + kmem_cache_destroy(sctp_chunk_cachep); +err_chunk_cachep: + kmem_cache_destroy(sctp_bucket_cachep); + goto out; +} + +/* Exit handler for the SCTP protocol. */ +static __exit void sctp_exit(void) +{ + /* BUG. This should probably do something useful like clean + * up all the remaining associations and all that memory. + */ + + /* Unregister with inet6/inet layers. */ + sctp_v6_del_protocol(); + sctp_v4_del_protocol(); + + unregister_pernet_subsys(&sctp_net_ops); + + /* Free protosw registrations */ + sctp_v6_protosw_exit(); + sctp_v4_protosw_exit(); + + /* Unregister with socket layer. */ + sctp_v6_pf_exit(); + sctp_v4_pf_exit(); + + sctp_sysctl_unregister(); + + free_pages((unsigned long)sctp_assoc_hashtable, + get_order(sctp_assoc_hashsize * + sizeof(struct sctp_hashbucket))); + kfree(sctp_ep_hashtable); + free_pages((unsigned long)sctp_port_hashtable, + get_order(sctp_port_hashsize * + sizeof(struct sctp_bind_hashbucket))); + + percpu_counter_destroy(&sctp_sockets_allocated); + + rcu_barrier(); /* Wait for completion of call_rcu()'s */ + + kmem_cache_destroy(sctp_chunk_cachep); + kmem_cache_destroy(sctp_bucket_cachep); +} + +module_init(sctp_init); +module_exit(sctp_exit); + +/* + * __stringify doesn't likes enums, so use IPPROTO_SCTP value (132) directly. + */ +MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132"); +MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132"); +MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>"); +MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); +module_param_named(no_checksums, sctp_checksum_disable, bool, 0644); +MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification"); +MODULE_LICENSE("GPL"); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c new file mode 100644 index 000000000..06320c8c1 --- /dev/null +++ b/net/sctp/sm_make_chunk.c @@ -0,0 +1,3501 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2002 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions work with the state functions in sctp_sm_statefuns.c + * to implement the state operations. These functions implement the + * steps which require modifying existing data structures. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * C. Robin <chris@hundredacre.ac.uk> + * Jon Grimm <jgrimm@us.ibm.com> + * Xingang Guo <xingang.guo@intel.com> + * Dajiang Zhang <dajiang.zhang@nokia.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Daisy Chang <daisyc@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + * Kevin Gao <kevin.gao@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/net.h> +#include <linux/inet.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <net/sock.h> + +#include <linux/skbuff.h> +#include <linux/random.h> /* for get_random_bytes */ +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen); +static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, + __u8 flags, int paylen); +static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen); +static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const struct sctp_chunk *init_chunk, + int *cookie_len, + const __u8 *raw_addrs, int addrs_len); +static int sctp_process_param(struct sctp_association *asoc, + union sctp_params param, + const union sctp_addr *peer_addr, + gfp_t gfp); +static void *sctp_addto_param(struct sctp_chunk *chunk, int len, + const void *data); +static void *sctp_addto_chunk_fixed(struct sctp_chunk *, int len, + const void *data); + +/* Control chunk destructor */ +static void sctp_control_release_owner(struct sk_buff *skb) +{ + /*TODO: do memory release */ +} + +static void sctp_control_set_owner_w(struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = chunk->asoc; + struct sk_buff *skb = chunk->skb; + + /* TODO: properly account for control chunks. + * To do it right we'll need: + * 1) endpoint if association isn't known. + * 2) proper memory accounting. + * + * For now don't do anything for now. + */ + skb->sk = asoc ? asoc->base.sk : NULL; + skb->destructor = sctp_control_release_owner; +} + +/* What was the inbound interface for this chunk? */ +int sctp_chunk_iif(const struct sctp_chunk *chunk) +{ + struct sctp_af *af; + int iif = 0; + + af = sctp_get_af_specific(ipver2af(ip_hdr(chunk->skb)->version)); + if (af) + iif = af->skb_iif(chunk->skb); + + return iif; +} + +/* RFC 2960 3.3.2 Initiation (INIT) (1) + * + * Note 2: The ECN capable field is reserved for future use of + * Explicit Congestion Notification. + */ +static const struct sctp_paramhdr ecap_param = { + SCTP_PARAM_ECN_CAPABLE, + cpu_to_be16(sizeof(struct sctp_paramhdr)), +}; +static const struct sctp_paramhdr prsctp_param = { + SCTP_PARAM_FWD_TSN_SUPPORT, + cpu_to_be16(sizeof(struct sctp_paramhdr)), +}; + +/* A helper to initialize an op error inside a + * provided chunk, as most cause codes will be embedded inside an + * abort chunk. + */ +void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code, + size_t paylen) +{ + sctp_errhdr_t err; + __u16 len; + + /* Cause code constants are now defined in network order. */ + err.cause = cause_code; + len = sizeof(sctp_errhdr_t) + paylen; + err.length = htons(len); + chunk->subh.err_hdr = sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); +} + +/* A helper to initialize an op error inside a + * provided chunk, as most cause codes will be embedded inside an + * abort chunk. Differs from sctp_init_cause in that it won't oops + * if there isn't enough space in the op error chunk + */ +static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code, + size_t paylen) +{ + sctp_errhdr_t err; + __u16 len; + + /* Cause code constants are now defined in network order. */ + err.cause = cause_code; + len = sizeof(sctp_errhdr_t) + paylen; + err.length = htons(len); + + if (skb_tailroom(chunk->skb) < len) + return -ENOSPC; + chunk->subh.err_hdr = sctp_addto_chunk_fixed(chunk, + sizeof(sctp_errhdr_t), + &err); + return 0; +} +/* 3.3.2 Initiation (INIT) (1) + * + * This chunk is used to initiate a SCTP association between two + * endpoints. The format of the INIT chunk is shown below: + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 1 | Chunk Flags | Chunk Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Initiate Tag | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Advertised Receiver Window Credit (a_rwnd) | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Number of Outbound Streams | Number of Inbound Streams | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Initial TSN | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ \ + * / Optional/Variable-Length Parameters / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * + * The INIT chunk contains the following parameters. Unless otherwise + * noted, each parameter MUST only be included once in the INIT chunk. + * + * Fixed Parameters Status + * ---------------------------------------------- + * Initiate Tag Mandatory + * Advertised Receiver Window Credit Mandatory + * Number of Outbound Streams Mandatory + * Number of Inbound Streams Mandatory + * Initial TSN Mandatory + * + * Variable Parameters Status Type Value + * ------------------------------------------------------------- + * IPv4 Address (Note 1) Optional 5 + * IPv6 Address (Note 1) Optional 6 + * Cookie Preservative Optional 9 + * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) + * Host Name Address (Note 3) Optional 11 + * Supported Address Types (Note 4) Optional 12 + */ +struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, + const struct sctp_bind_addr *bp, + gfp_t gfp, int vparam_len) +{ + struct net *net = sock_net(asoc->base.sk); + struct sctp_endpoint *ep = asoc->ep; + sctp_inithdr_t init; + union sctp_params addrs; + size_t chunksize; + struct sctp_chunk *retval = NULL; + int num_types, addrs_len = 0; + struct sctp_sock *sp; + sctp_supported_addrs_param_t sat; + __be16 types[2]; + sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL; + + /* RFC 2960 3.3.2 Initiation (INIT) (1) + * + * Note 1: The INIT chunks can contain multiple addresses that + * can be IPv4 and/or IPv6 in any combination. + */ + retval = NULL; + + /* Convert the provided bind address list to raw format. */ + addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); + + init.init_tag = htonl(asoc->c.my_vtag); + init.a_rwnd = htonl(asoc->rwnd); + init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); + init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); + init.initial_tsn = htonl(asoc->c.initial_tsn); + + /* How many address types are needed? */ + sp = sctp_sk(asoc->base.sk); + num_types = sp->pf->supported_addrs(sp, types); + + chunksize = sizeof(init) + addrs_len; + chunksize += WORD_ROUND(SCTP_SAT_LEN(num_types)); + chunksize += sizeof(ecap_param); + + if (net->sctp.prsctp_enable) + chunksize += sizeof(prsctp_param); + + /* ADDIP: Section 4.2.7: + * An implementation supporting this extension [ADDIP] MUST list + * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and + * INIT-ACK parameters. + */ + if (net->sctp.addip_enable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + + if (sp->adaptation_ind) + chunksize += sizeof(aiparam); + + chunksize += vparam_len; + + /* Account for AUTH related parameters */ + if (ep->auth_enable) { + /* Add random parameter length*/ + chunksize += sizeof(asoc->c.auth_random); + + /* Add HMACS parameter length if any were defined */ + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); + else + auth_hmacs = NULL; + + /* Add CHUNKS parameter length */ + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); + else + auth_chunks = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + + /* If we have any extensions to report, account for that */ + if (num_ext) + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + + num_ext); + + /* RFC 2960 3.3.2 Initiation (INIT) (1) + * + * Note 3: An INIT chunk MUST NOT contain more than one Host + * Name address parameter. Moreover, the sender of the INIT + * MUST NOT combine any other address types with the Host Name + * address in the INIT. The receiver of INIT MUST ignore any + * other address types if the Host Name address parameter is + * present in the received INIT chunk. + * + * PLEASE DO NOT FIXME [This version does not support Host Name.] + */ + + retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize); + if (!retval) + goto nodata; + + retval->subh.init_hdr = + sctp_addto_chunk(retval, sizeof(init), &init); + retval->param_hdr.v = + sctp_addto_chunk(retval, addrs_len, addrs.v); + + /* RFC 2960 3.3.2 Initiation (INIT) (1) + * + * Note 4: This parameter, when present, specifies all the + * address types the sending endpoint can support. The absence + * of this parameter indicates that the sending endpoint can + * support any address type. + */ + sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; + sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); + sctp_addto_chunk(retval, sizeof(sat), &sat); + sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); + + sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + + /* Add the supported extensions parameter. Be nice and add this + * fist before addiding the parameters for the extensions themselves + */ + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_param(retval, num_ext, extensions); + } + + if (net->sctp.prsctp_enable) + sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); + + if (sp->adaptation_ind) { + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; + aiparam.param_hdr.length = htons(sizeof(aiparam)); + aiparam.adaptation_ind = htonl(sp->adaptation_ind); + sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + } + + /* Add SCTP-AUTH chunks to the parameter list */ + if (ep->auth_enable) { + sctp_addto_chunk(retval, sizeof(asoc->c.auth_random), + asoc->c.auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } +nodata: + kfree(addrs.v); + return retval; +} + +struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + gfp_t gfp, int unkparam_len) +{ + sctp_inithdr_t initack; + struct sctp_chunk *retval; + union sctp_params addrs; + struct sctp_sock *sp; + int addrs_len; + sctp_cookie_param_t *cookie; + int cookie_len; + size_t chunksize; + sctp_adaptation_ind_param_t aiparam; + sctp_supported_ext_param_t ext_param; + int num_ext = 0; + __u8 extensions[3]; + sctp_paramhdr_t *auth_chunks = NULL, + *auth_hmacs = NULL, + *auth_random = NULL; + + retval = NULL; + + /* Note: there may be no addresses to embed. */ + addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); + + initack.init_tag = htonl(asoc->c.my_vtag); + initack.a_rwnd = htonl(asoc->rwnd); + initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); + initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); + initack.initial_tsn = htonl(asoc->c.initial_tsn); + + /* FIXME: We really ought to build the cookie right + * into the packet instead of allocating more fresh memory. + */ + cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, + addrs.v, addrs_len); + if (!cookie) + goto nomem_cookie; + + /* Calculate the total size of allocation, include the reserved + * space for reporting unknown parameters if it is specified. + */ + sp = sctp_sk(asoc->base.sk); + chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; + + /* Tell peer that we'll do ECN only if peer advertised such cap. */ + if (asoc->peer.ecn_capable) + chunksize += sizeof(ecap_param); + + if (asoc->peer.prsctp_capable) + chunksize += sizeof(prsctp_param); + + if (asoc->peer.asconf_capable) { + extensions[num_ext] = SCTP_CID_ASCONF; + extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; + num_ext += 2; + } + + if (sp->adaptation_ind) + chunksize += sizeof(aiparam); + + if (asoc->peer.auth_capable) { + auth_random = (sctp_paramhdr_t *)asoc->c.auth_random; + chunksize += ntohs(auth_random->length); + + auth_hmacs = (sctp_paramhdr_t *)asoc->c.auth_hmacs; + if (auth_hmacs->length) + chunksize += WORD_ROUND(ntohs(auth_hmacs->length)); + else + auth_hmacs = NULL; + + auth_chunks = (sctp_paramhdr_t *)asoc->c.auth_chunks; + if (auth_chunks->length) + chunksize += WORD_ROUND(ntohs(auth_chunks->length)); + else + auth_chunks = NULL; + + extensions[num_ext] = SCTP_CID_AUTH; + num_ext += 1; + } + + if (num_ext) + chunksize += WORD_ROUND(sizeof(sctp_supported_ext_param_t) + + num_ext); + + /* Now allocate and fill out the chunk. */ + retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize); + if (!retval) + goto nomem_chunk; + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it received the DATA or control chunk + * to which it is replying. + * + * [INIT ACK back to where the INIT came from.] + */ + retval->transport = chunk->transport; + + retval->subh.init_hdr = + sctp_addto_chunk(retval, sizeof(initack), &initack); + retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); + sctp_addto_chunk(retval, cookie_len, cookie); + if (asoc->peer.ecn_capable) + sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); + if (num_ext) { + ext_param.param_hdr.type = SCTP_PARAM_SUPPORTED_EXT; + ext_param.param_hdr.length = + htons(sizeof(sctp_supported_ext_param_t) + num_ext); + sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), + &ext_param); + sctp_addto_param(retval, num_ext, extensions); + } + if (asoc->peer.prsctp_capable) + sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); + + if (sp->adaptation_ind) { + aiparam.param_hdr.type = SCTP_PARAM_ADAPTATION_LAYER_IND; + aiparam.param_hdr.length = htons(sizeof(aiparam)); + aiparam.adaptation_ind = htonl(sp->adaptation_ind); + sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); + } + + if (asoc->peer.auth_capable) { + sctp_addto_chunk(retval, ntohs(auth_random->length), + auth_random); + if (auth_hmacs) + sctp_addto_chunk(retval, ntohs(auth_hmacs->length), + auth_hmacs); + if (auth_chunks) + sctp_addto_chunk(retval, ntohs(auth_chunks->length), + auth_chunks); + } + + /* We need to remove the const qualifier at this point. */ + retval->asoc = (struct sctp_association *) asoc; + +nomem_chunk: + kfree(cookie); +nomem_cookie: + kfree(addrs.v); + return retval; +} + +/* 3.3.11 Cookie Echo (COOKIE ECHO) (10): + * + * This chunk is used only during the initialization of an association. + * It is sent by the initiator of an association to its peer to complete + * the initialization process. This chunk MUST precede any DATA chunk + * sent within the association, but MAY be bundled with one or more DATA + * chunks in the same packet. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 10 |Chunk Flags | Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * / Cookie / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: 8 bit + * + * Set to zero on transmit and ignored on receipt. + * + * Length: 16 bits (unsigned integer) + * + * Set to the size of the chunk in bytes, including the 4 bytes of + * the chunk header and the size of the Cookie. + * + * Cookie: variable size + * + * This field must contain the exact cookie received in the + * State Cookie parameter from the previous INIT ACK. + * + * An implementation SHOULD make the cookie as small as possible + * to insure interoperability. + */ +struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + void *cookie; + int cookie_len; + + cookie = asoc->peer.cookie; + cookie_len = asoc->peer.cookie_len; + + /* Build a cookie echo chunk. */ + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); + if (!retval) + goto nodata; + retval->subh.cookie_hdr = + sctp_addto_chunk(retval, cookie_len, cookie); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [COOKIE ECHO back to where the INIT ACK came from.] + */ + if (chunk) + retval->transport = chunk->transport; + +nodata: + return retval; +} + +/* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): + * + * This chunk is used only during the initialization of an + * association. It is used to acknowledge the receipt of a COOKIE + * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent + * within the association, but MAY be bundled with one or more DATA + * chunks or SACK chunk in the same SCTP packet. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 11 |Chunk Flags | Length = 4 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Chunk Flags: 8 bits + * + * Set to zero on transmit and ignored on receipt. + */ +struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [COOKIE ACK back to where the COOKIE ECHO came from.] + */ + if (retval && chunk) + retval->transport = chunk->transport; + + return retval; +} + +/* + * Appendix A: Explicit Congestion Notification: + * CWR: + * + * RFC 2481 details a specific bit for a sender to send in the header of + * its next outbound TCP segment to indicate to its peer that it has + * reduced its congestion window. This is termed the CWR bit. For + * SCTP the same indication is made by including the CWR chunk. + * This chunk contains one data element, i.e. the TSN number that + * was sent in the ECNE chunk. This element represents the lowest + * TSN number in the datagram that was originally marked with the + * CE bit. + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Lowest TSN Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Note: The CWR is considered a Control chunk. + */ +struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, + const __u32 lowest_tsn, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + sctp_cwrhdr_t cwr; + + cwr.lowest_tsn = htonl(lowest_tsn); + retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0, + sizeof(sctp_cwrhdr_t)); + + if (!retval) + goto nodata; + + retval->subh.ecn_cwr_hdr = + sctp_addto_chunk(retval, sizeof(cwr), &cwr); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [Report a reduced congestion window back to where the ECNE + * came from.] + */ + if (chunk) + retval->transport = chunk->transport; + +nodata: + return retval; +} + +/* Make an ECNE chunk. This is a congestion experienced report. */ +struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, + const __u32 lowest_tsn) +{ + struct sctp_chunk *retval; + sctp_ecnehdr_t ecne; + + ecne.lowest_tsn = htonl(lowest_tsn); + retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0, + sizeof(sctp_ecnehdr_t)); + if (!retval) + goto nodata; + retval->subh.ecne_hdr = + sctp_addto_chunk(retval, sizeof(ecne), &ecne); + +nodata: + return retval; +} + +/* Make a DATA chunk for the given association from the provided + * parameters. However, do not populate the data payload. + */ +struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, + const struct sctp_sndrcvinfo *sinfo, + int data_len, __u8 flags, __u16 ssn) +{ + struct sctp_chunk *retval; + struct sctp_datahdr dp; + int chunk_len; + + /* We assign the TSN as LATE as possible, not here when + * creating the chunk. + */ + dp.tsn = 0; + dp.stream = htons(sinfo->sinfo_stream); + dp.ppid = sinfo->sinfo_ppid; + + /* Set the flags for an unordered send. */ + if (sinfo->sinfo_flags & SCTP_UNORDERED) { + flags |= SCTP_DATA_UNORDERED; + dp.ssn = 0; + } else + dp.ssn = htons(ssn); + + chunk_len = sizeof(dp) + data_len; + retval = sctp_make_data(asoc, flags, chunk_len); + if (!retval) + goto nodata; + + retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); + memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); + +nodata: + return retval; +} + +/* Create a selective ackowledgement (SACK) for the given + * association. This reports on which TSN's we've seen to date, + * including duplicates and gaps. + */ +struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) +{ + struct sctp_chunk *retval; + struct sctp_sackhdr sack; + int len; + __u32 ctsn; + __u16 num_gabs, num_dup_tsns; + struct sctp_association *aptr = (struct sctp_association *)asoc; + struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; + struct sctp_gap_ack_block gabs[SCTP_MAX_GABS]; + struct sctp_transport *trans; + + memset(gabs, 0, sizeof(gabs)); + ctsn = sctp_tsnmap_get_ctsn(map); + + pr_debug("%s: sackCTSNAck sent:0x%x\n", __func__, ctsn); + + /* How much room is needed in the chunk? */ + num_gabs = sctp_tsnmap_num_gabs(map, gabs); + num_dup_tsns = sctp_tsnmap_num_dups(map); + + /* Initialize the SACK header. */ + sack.cum_tsn_ack = htonl(ctsn); + sack.a_rwnd = htonl(asoc->a_rwnd); + sack.num_gap_ack_blocks = htons(num_gabs); + sack.num_dup_tsns = htons(num_dup_tsns); + + len = sizeof(sack) + + sizeof(struct sctp_gap_ack_block) * num_gabs + + sizeof(__u32) * num_dup_tsns; + + /* Create the chunk. */ + retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len); + if (!retval) + goto nodata; + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, etc.) to the same destination transport + * address from which it received the DATA or control chunk to + * which it is replying. This rule should also be followed if + * the endpoint is bundling DATA chunks together with the + * reply chunk. + * + * However, when acknowledging multiple DATA chunks received + * in packets from different source addresses in a single + * SACK, the SACK chunk may be transmitted to one of the + * destination transport addresses from which the DATA or + * control chunks being acknowledged were received. + * + * [BUG: We do not implement the following paragraph. + * Perhaps we should remember the last transport we used for a + * SACK and avoid that (if possible) if we have seen any + * duplicates. --piggy] + * + * When a receiver of a duplicate DATA chunk sends a SACK to a + * multi- homed endpoint it MAY be beneficial to vary the + * destination address and not use the source address of the + * DATA chunk. The reason being that receiving a duplicate + * from a multi-homed endpoint might indicate that the return + * path (as specified in the source address of the DATA chunk) + * for the SACK is broken. + * + * [Send to the address from which we last received a DATA chunk.] + */ + retval->transport = asoc->peer.last_data_from; + + retval->subh.sack_hdr = + sctp_addto_chunk(retval, sizeof(sack), &sack); + + /* Add the gap ack block information. */ + if (num_gabs) + sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, + gabs); + + /* Add the duplicate TSN information. */ + if (num_dup_tsns) { + aptr->stats.idupchunks += num_dup_tsns; + sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, + sctp_tsnmap_get_dups(map)); + } + /* Once we have a sack generated, check to see what our sack + * generation is, if its 0, reset the transports to 0, and reset + * the association generation to 1 + * + * The idea is that zero is never used as a valid generation for the + * association so no transport will match after a wrap event like this, + * Until the next sack + */ + if (++aptr->peer.sack_generation == 0) { + list_for_each_entry(trans, &asoc->peer.transport_addr_list, + transports) + trans->sack_generation = 0; + aptr->peer.sack_generation = 1; + } +nodata: + return retval; +} + +/* Make a SHUTDOWN chunk. */ +struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + sctp_shutdownhdr_t shut; + __u32 ctsn; + + ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + shut.cum_tsn_ack = htonl(ctsn); + + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0, + sizeof(sctp_shutdownhdr_t)); + if (!retval) + goto nodata; + + retval->subh.shutdown_hdr = + sctp_addto_chunk(retval, sizeof(shut), &shut); + + if (chunk) + retval->transport = chunk->transport; +nodata: + return retval; +} + +struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [ACK back to where the SHUTDOWN came from.] + */ + if (retval && chunk) + retval->transport = chunk->transport; + + return retval; +} + +struct sctp_chunk *sctp_make_shutdown_complete( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + __u8 flags = 0; + + /* Set the T-bit if we have no association (vtag will be + * reflected) + */ + flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; + + retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK + * came from.] + */ + if (retval && chunk) + retval->transport = chunk->transport; + + return retval; +} + +/* Create an ABORT. Note that we set the T bit if we have no + * association, except when responding to an INIT (sctpimpguide 2.41). + */ +struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const size_t hint) +{ + struct sctp_chunk *retval; + __u8 flags = 0; + + /* Set the T-bit if we have no association and 'chunk' is not + * an INIT (vtag will be reflected). + */ + if (!asoc) { + if (chunk && chunk->chunk_hdr && + chunk->chunk_hdr->type == SCTP_CID_INIT) + flags = 0; + else + flags = SCTP_CHUNK_FLAG_T; + } + + retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [ABORT back to where the offender came from.] + */ + if (retval && chunk) + retval->transport = chunk->transport; + + return retval; +} + +/* Helper to create ABORT with a NO_USER_DATA error. */ +struct sctp_chunk *sctp_make_abort_no_data( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, __u32 tsn) +{ + struct sctp_chunk *retval; + __be32 payload; + + retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + + sizeof(tsn)); + + if (!retval) + goto no_mem; + + /* Put the tsn back into network byte order. */ + payload = htonl(tsn); + sctp_init_cause(retval, SCTP_ERROR_NO_DATA, sizeof(payload)); + sctp_addto_chunk(retval, sizeof(payload), (const void *)&payload); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [ABORT back to where the offender came from.] + */ + if (chunk) + retval->transport = chunk->transport; + +no_mem: + return retval; +} + +/* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ +struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, + struct msghdr *msg, + size_t paylen) +{ + struct sctp_chunk *retval; + void *payload = NULL; + int err; + + retval = sctp_make_abort(asoc, NULL, sizeof(sctp_errhdr_t) + paylen); + if (!retval) + goto err_chunk; + + if (paylen) { + /* Put the msg_iov together into payload. */ + payload = kmalloc(paylen, GFP_KERNEL); + if (!payload) + goto err_payload; + + err = memcpy_from_msg(payload, msg, paylen); + if (err < 0) + goto err_copy; + } + + sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, paylen); + sctp_addto_chunk(retval, paylen, payload); + + if (paylen) + kfree(payload); + + return retval; + +err_copy: + kfree(payload); +err_payload: + sctp_chunk_free(retval); + retval = NULL; +err_chunk: + return retval; +} + +/* Append bytes to the end of a parameter. Will panic if chunk is not big + * enough. + */ +static void *sctp_addto_param(struct sctp_chunk *chunk, int len, + const void *data) +{ + void *target; + int chunklen = ntohs(chunk->chunk_hdr->length); + + target = skb_put(chunk->skb, len); + + if (data) + memcpy(target, data, len); + else + memset(target, 0, len); + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = htons(chunklen + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return target; +} + +/* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ +struct sctp_chunk *sctp_make_abort_violation( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const __u8 *payload, + const size_t paylen) +{ + struct sctp_chunk *retval; + struct sctp_paramhdr phdr; + + retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen + + sizeof(sctp_paramhdr_t)); + if (!retval) + goto end; + + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, paylen + + sizeof(sctp_paramhdr_t)); + + phdr.type = htons(chunk->chunk_hdr->type); + phdr.length = chunk->chunk_hdr->length; + sctp_addto_chunk(retval, paylen, payload); + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), &phdr); + +end: + return retval; +} + +struct sctp_chunk *sctp_make_violation_paramlen( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + struct sctp_paramhdr *param) +{ + struct sctp_chunk *retval; + static const char error[] = "The following parameter had invalid length:"; + size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t) + + sizeof(sctp_paramhdr_t); + + retval = sctp_make_abort(asoc, chunk, payload_len); + if (!retval) + goto nodata; + + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, + sizeof(error) + sizeof(sctp_paramhdr_t)); + sctp_addto_chunk(retval, sizeof(error), error); + sctp_addto_param(retval, sizeof(sctp_paramhdr_t), param); + +nodata: + return retval; +} + +struct sctp_chunk *sctp_make_violation_max_retrans( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_chunk *retval; + static const char error[] = "Association exceeded its max_retans count"; + size_t payload_len = sizeof(error) + sizeof(sctp_errhdr_t); + + retval = sctp_make_abort(asoc, chunk, payload_len); + if (!retval) + goto nodata; + + sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, sizeof(error)); + sctp_addto_chunk(retval, sizeof(error), error); + +nodata: + return retval; +} + +/* Make a HEARTBEAT chunk. */ +struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, + const struct sctp_transport *transport) +{ + struct sctp_chunk *retval; + sctp_sender_hb_info_t hbinfo; + + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo)); + + if (!retval) + goto nodata; + + hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; + hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); + hbinfo.daddr = transport->ipaddr; + hbinfo.sent_at = jiffies; + hbinfo.hb_nonce = transport->hb_nonce; + + /* Cast away the 'const', as this is just telling the chunk + * what transport it belongs to. + */ + retval->transport = (struct sctp_transport *) transport; + retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo), + &hbinfo); + +nodata: + return retval; +} + +struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + const void *payload, const size_t paylen) +{ + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); + if (!retval) + goto nodata; + + retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, * etc.) to the same destination transport + * address from which it * received the DATA or control chunk + * to which it is replying. + * + * [HBACK back to where the HEARTBEAT came from.] + */ + if (chunk) + retval->transport = chunk->transport; + +nodata: + return retval; +} + +/* Create an Operation Error chunk with the specified space reserved. + * This routine can be used for containing multiple causes in the chunk. + */ +static struct sctp_chunk *sctp_make_op_error_space( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + size_t size) +{ + struct sctp_chunk *retval; + + retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0, + sizeof(sctp_errhdr_t) + size); + if (!retval) + goto nodata; + + /* RFC 2960 6.4 Multi-homed SCTP Endpoints + * + * An endpoint SHOULD transmit reply chunks (e.g., SACK, + * HEARTBEAT ACK, etc.) to the same destination transport + * address from which it received the DATA or control chunk + * to which it is replying. + * + */ + if (chunk) + retval->transport = chunk->transport; + +nodata: + return retval; +} + +/* Create an Operation Error chunk of a fixed size, + * specifically, max(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) + * This is a helper function to allocate an error chunk for + * for those invalid parameter codes in which we may not want + * to report all the errors, if the incoming chunk is large + */ +static inline struct sctp_chunk *sctp_make_op_error_fixed( + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + size_t size = asoc ? asoc->pathmtu : 0; + + if (!size) + size = SCTP_DEFAULT_MAXSEGMENT; + + return sctp_make_op_error_space(asoc, chunk, size); +} + +/* Create an Operation Error chunk. */ +struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + __be16 cause_code, const void *payload, + size_t paylen, size_t reserve_tail) +{ + struct sctp_chunk *retval; + + retval = sctp_make_op_error_space(asoc, chunk, paylen + reserve_tail); + if (!retval) + goto nodata; + + sctp_init_cause(retval, cause_code, paylen + reserve_tail); + sctp_addto_chunk(retval, paylen, payload); + if (reserve_tail) + sctp_addto_param(retval, reserve_tail, NULL); + +nodata: + return retval; +} + +struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc) +{ + struct sctp_chunk *retval; + struct sctp_hmac *hmac_desc; + struct sctp_authhdr auth_hdr; + __u8 *hmac; + + /* Get the first hmac that the peer told us to use */ + hmac_desc = sctp_auth_asoc_get_hmac(asoc); + if (unlikely(!hmac_desc)) + return NULL; + + retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0, + hmac_desc->hmac_len + sizeof(sctp_authhdr_t)); + if (!retval) + return NULL; + + auth_hdr.hmac_id = htons(hmac_desc->hmac_id); + auth_hdr.shkey_id = htons(asoc->active_key_id); + + retval->subh.auth_hdr = sctp_addto_chunk(retval, sizeof(sctp_authhdr_t), + &auth_hdr); + + hmac = skb_put(retval->skb, hmac_desc->hmac_len); + memset(hmac, 0, hmac_desc->hmac_len); + + /* Adjust the chunk header to include the empty MAC */ + retval->chunk_hdr->length = + htons(ntohs(retval->chunk_hdr->length) + hmac_desc->hmac_len); + retval->chunk_end = skb_tail_pointer(retval->skb); + + return retval; +} + + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* Turn an skb into a chunk. + * FIXME: Eventually move the structure directly inside the skb->cb[]. + * + * sctpimpguide-05.txt Section 2.8.2 + * M1) Each time a new DATA chunk is transmitted + * set the 'TSN.Missing.Report' count for that TSN to 0. The + * 'TSN.Missing.Report' count will be used to determine missing chunks + * and when to fast retransmit. + * + */ +struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, + const struct sctp_association *asoc, + struct sock *sk) +{ + struct sctp_chunk *retval; + + retval = kmem_cache_zalloc(sctp_chunk_cachep, GFP_ATOMIC); + + if (!retval) + goto nodata; + if (!sk) + pr_debug("%s: chunkifying skb:%p w/o an sk\n", __func__, skb); + + INIT_LIST_HEAD(&retval->list); + retval->skb = skb; + retval->asoc = (struct sctp_association *)asoc; + retval->singleton = 1; + + retval->fast_retransmit = SCTP_CAN_FRTX; + + /* Polish the bead hole. */ + INIT_LIST_HEAD(&retval->transmitted_list); + INIT_LIST_HEAD(&retval->frag_list); + SCTP_DBG_OBJCNT_INC(chunk); + atomic_set(&retval->refcnt, 1); + +nodata: + return retval; +} + +/* Set chunk->source and dest based on the IP header in chunk->skb. */ +void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, + union sctp_addr *dest) +{ + memcpy(&chunk->source, src, sizeof(union sctp_addr)); + memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); +} + +/* Extract the source address from a chunk. */ +const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) +{ + /* If we have a known transport, use that. */ + if (chunk->transport) { + return &chunk->transport->ipaddr; + } else { + /* Otherwise, extract it from the IP header. */ + return &chunk->source; + } +} + +/* Create a new chunk, setting the type and flags headers from the + * arguments, reserving enough space for a 'paylen' byte payload. + */ +static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen) +{ + struct sctp_chunk *retval; + sctp_chunkhdr_t *chunk_hdr; + struct sk_buff *skb; + struct sock *sk; + + /* No need to allocate LL here, as this is only a chunk. */ + skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), + GFP_ATOMIC); + if (!skb) + goto nodata; + + /* Make room for the chunk header. */ + chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); + chunk_hdr->type = type; + chunk_hdr->flags = flags; + chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); + + sk = asoc ? asoc->base.sk : NULL; + retval = sctp_chunkify(skb, asoc, sk); + if (!retval) { + kfree_skb(skb); + goto nodata; + } + + retval->chunk_hdr = chunk_hdr; + retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); + + /* Determine if the chunk needs to be authenticated */ + if (sctp_auth_send_cid(type, asoc)) + retval->auth = 1; + + return retval; +nodata: + return NULL; +} + +static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc, + __u8 flags, int paylen) +{ + return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen); +} + +static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc, + __u8 type, __u8 flags, int paylen) +{ + struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen); + + if (chunk) + sctp_control_set_owner_w(chunk); + + return chunk; +} + +/* Release the memory occupied by a chunk. */ +static void sctp_chunk_destroy(struct sctp_chunk *chunk) +{ + BUG_ON(!list_empty(&chunk->list)); + list_del_init(&chunk->transmitted_list); + + consume_skb(chunk->skb); + consume_skb(chunk->auth_chunk); + + SCTP_DBG_OBJCNT_DEC(chunk); + kmem_cache_free(sctp_chunk_cachep, chunk); +} + +/* Possibly, free the chunk. */ +void sctp_chunk_free(struct sctp_chunk *chunk) +{ + /* Release our reference on the message tracker. */ + if (chunk->msg) + sctp_datamsg_put(chunk->msg); + + sctp_chunk_put(chunk); +} + +/* Grab a reference to the chunk. */ +void sctp_chunk_hold(struct sctp_chunk *ch) +{ + atomic_inc(&ch->refcnt); +} + +/* Release a reference to the chunk. */ +void sctp_chunk_put(struct sctp_chunk *ch) +{ + if (atomic_dec_and_test(&ch->refcnt)) + sctp_chunk_destroy(ch); +} + +/* Append bytes to the end of a chunk. Will panic if chunk is not big + * enough. + */ +void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) +{ + void *target; + void *padding; + int chunklen = ntohs(chunk->chunk_hdr->length); + int padlen = WORD_ROUND(chunklen) - chunklen; + + padding = skb_put(chunk->skb, padlen); + target = skb_put(chunk->skb, len); + + memset(padding, 0, padlen); + memcpy(target, data, len); + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = htons(chunklen + padlen + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return target; +} + +/* Append bytes to the end of a chunk. Returns NULL if there isn't sufficient + * space in the chunk + */ +static void *sctp_addto_chunk_fixed(struct sctp_chunk *chunk, + int len, const void *data) +{ + if (skb_tailroom(chunk->skb) >= len) + return sctp_addto_chunk(chunk, len, data); + else + return NULL; +} + +/* Append bytes from user space to the end of a chunk. Will panic if + * chunk is not big enough. + * Returns a kernel err value. + */ +int sctp_user_addto_chunk(struct sctp_chunk *chunk, int len, + struct iov_iter *from) +{ + void *target; + ssize_t copied; + + /* Make room in chunk for data. */ + target = skb_put(chunk->skb, len); + + /* Copy data (whole iovec) into chunk */ + copied = copy_from_iter(target, len, from); + if (copied != len) + return -EFAULT; + + /* Adjust the chunk length field. */ + chunk->chunk_hdr->length = + htons(ntohs(chunk->chunk_hdr->length) + len); + chunk->chunk_end = skb_tail_pointer(chunk->skb); + + return 0; +} + +/* Helper function to assign a TSN if needed. This assumes that both + * the data_hdr and association have already been assigned. + */ +void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) +{ + struct sctp_datamsg *msg; + struct sctp_chunk *lchunk; + struct sctp_stream *stream; + __u16 ssn; + __u16 sid; + + if (chunk->has_ssn) + return; + + /* All fragments will be on the same stream */ + sid = ntohs(chunk->subh.data_hdr->stream); + stream = &chunk->asoc->ssnmap->out; + + /* Now assign the sequence number to the entire message. + * All fragments must have the same stream sequence number. + */ + msg = chunk->msg; + list_for_each_entry(lchunk, &msg->chunks, frag_list) { + if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { + ssn = 0; + } else { + if (lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) + ssn = sctp_ssn_next(stream, sid); + else + ssn = sctp_ssn_peek(stream, sid); + } + + lchunk->subh.data_hdr->ssn = htons(ssn); + lchunk->has_ssn = 1; + } +} + +/* Helper function to assign a TSN if needed. This assumes that both + * the data_hdr and association have already been assigned. + */ +void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) +{ + if (!chunk->has_tsn) { + /* This is the last possible instant to + * assign a TSN. + */ + chunk->subh.data_hdr->tsn = + htonl(sctp_association_get_next_tsn(chunk->asoc)); + chunk->has_tsn = 1; + } +} + +/* Create a CLOSED association to use with an incoming packet. */ +struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, + struct sctp_chunk *chunk, + gfp_t gfp) +{ + struct sctp_association *asoc; + struct sk_buff *skb; + sctp_scope_t scope; + struct sctp_af *af; + + /* Create the bare association. */ + scope = sctp_scope(sctp_source(chunk)); + asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); + if (!asoc) + goto nodata; + asoc->temp = 1; + skb = chunk->skb; + /* Create an entry for the source address of the packet. */ + af = sctp_get_af_specific(ipver2af(ip_hdr(skb)->version)); + if (unlikely(!af)) + goto fail; + af->from_skb(&asoc->c.peer_addr, skb, 1); +nodata: + return asoc; + +fail: + sctp_association_free(asoc); + return NULL; +} + +/* Build a cookie representing asoc. + * This INCLUDES the param header needed to put the cookie in the INIT ACK. + */ +static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const struct sctp_chunk *init_chunk, + int *cookie_len, + const __u8 *raw_addrs, int addrs_len) +{ + sctp_cookie_param_t *retval; + struct sctp_signed_cookie *cookie; + struct scatterlist sg; + int headersize, bodysize; + + /* Header size is static data prior to the actual cookie, including + * any padding. + */ + headersize = sizeof(sctp_paramhdr_t) + + (sizeof(struct sctp_signed_cookie) - + sizeof(struct sctp_cookie)); + bodysize = sizeof(struct sctp_cookie) + + ntohs(init_chunk->chunk_hdr->length) + addrs_len; + + /* Pad out the cookie to a multiple to make the signature + * functions simpler to write. + */ + if (bodysize % SCTP_COOKIE_MULTIPLE) + bodysize += SCTP_COOKIE_MULTIPLE + - (bodysize % SCTP_COOKIE_MULTIPLE); + *cookie_len = headersize + bodysize; + + /* Clear this memory since we are sending this data structure + * out on the network. + */ + retval = kzalloc(*cookie_len, GFP_ATOMIC); + if (!retval) + goto nodata; + + cookie = (struct sctp_signed_cookie *) retval->body; + + /* Set up the parameter header. */ + retval->p.type = SCTP_PARAM_STATE_COOKIE; + retval->p.length = htons(*cookie_len); + + /* Copy the cookie part of the association itself. */ + cookie->c = asoc->c; + /* Save the raw address list length in the cookie. */ + cookie->c.raw_addr_list_len = addrs_len; + + /* Remember PR-SCTP capability. */ + cookie->c.prsctp_capable = asoc->peer.prsctp_capable; + + /* Save adaptation indication in the cookie. */ + cookie->c.adaptation_ind = asoc->peer.adaptation_ind; + + /* Set an expiration time for the cookie. */ + cookie->c.expiration = ktime_add(asoc->cookie_life, + ktime_get()); + + /* Copy the peer's init packet. */ + memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, + ntohs(init_chunk->chunk_hdr->length)); + + /* Copy the raw local address list of the association. */ + memcpy((__u8 *)&cookie->c.peer_init[0] + + ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); + + if (sctp_sk(ep->base.sk)->hmac) { + struct hash_desc desc; + + /* Sign the message. */ + sg_init_one(&sg, &cookie->c, bodysize); + desc.tfm = sctp_sk(ep->base.sk)->hmac; + desc.flags = 0; + + if (crypto_hash_setkey(desc.tfm, ep->secret_key, + sizeof(ep->secret_key)) || + crypto_hash_digest(&desc, &sg, bodysize, cookie->signature)) + goto free_cookie; + } + + return retval; + +free_cookie: + kfree(retval); +nodata: + *cookie_len = 0; + return NULL; +} + +/* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ +struct sctp_association *sctp_unpack_cookie( + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, gfp_t gfp, + int *error, struct sctp_chunk **errp) +{ + struct sctp_association *retval = NULL; + struct sctp_signed_cookie *cookie; + struct sctp_cookie *bear_cookie; + int headersize, bodysize, fixed_size; + __u8 *digest = ep->digest; + struct scatterlist sg; + unsigned int len; + sctp_scope_t scope; + struct sk_buff *skb = chunk->skb; + ktime_t kt; + struct hash_desc desc; + + /* Header size is static data prior to the actual cookie, including + * any padding. + */ + headersize = sizeof(sctp_chunkhdr_t) + + (sizeof(struct sctp_signed_cookie) - + sizeof(struct sctp_cookie)); + bodysize = ntohs(chunk->chunk_hdr->length) - headersize; + fixed_size = headersize + sizeof(struct sctp_cookie); + + /* Verify that the chunk looks like it even has a cookie. + * There must be enough room for our cookie and our peer's + * INIT chunk. + */ + len = ntohs(chunk->chunk_hdr->length); + if (len < fixed_size + sizeof(struct sctp_chunkhdr)) + goto malformed; + + /* Verify that the cookie has been padded out. */ + if (bodysize % SCTP_COOKIE_MULTIPLE) + goto malformed; + + /* Process the cookie. */ + cookie = chunk->subh.cookie_hdr; + bear_cookie = &cookie->c; + + if (!sctp_sk(ep->base.sk)->hmac) + goto no_hmac; + + /* Check the signature. */ + sg_init_one(&sg, bear_cookie, bodysize); + desc.tfm = sctp_sk(ep->base.sk)->hmac; + desc.flags = 0; + + memset(digest, 0x00, SCTP_SIGNATURE_SIZE); + if (crypto_hash_setkey(desc.tfm, ep->secret_key, + sizeof(ep->secret_key)) || + crypto_hash_digest(&desc, &sg, bodysize, digest)) { + *error = -SCTP_IERROR_NOMEM; + goto fail; + } + + if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { + *error = -SCTP_IERROR_BAD_SIG; + goto fail; + } + +no_hmac: + /* IG Section 2.35.2: + * 3) Compare the port numbers and the verification tag contained + * within the COOKIE ECHO chunk to the actual port numbers and the + * verification tag within the SCTP common header of the received + * packet. If these values do not match the packet MUST be silently + * discarded, + */ + if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { + *error = -SCTP_IERROR_BAD_TAG; + goto fail; + } + + if (chunk->sctp_hdr->source != bear_cookie->peer_addr.v4.sin_port || + ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { + *error = -SCTP_IERROR_BAD_PORTS; + goto fail; + } + + /* Check to see if the cookie is stale. If there is already + * an association, there is no need to check cookie's expiration + * for init collision case of lost COOKIE ACK. + * If skb has been timestamped, then use the stamp, otherwise + * use current time. This introduces a small possibility that + * that a cookie may be considered expired, but his would only slow + * down the new association establishment instead of every packet. + */ + if (sock_flag(ep->base.sk, SOCK_TIMESTAMP)) + kt = skb_get_ktime(skb); + else + kt = ktime_get(); + + if (!asoc && ktime_before(bear_cookie->expiration, kt)) { + /* + * Section 3.3.10.3 Stale Cookie Error (3) + * + * Cause of error + * --------------- + * Stale Cookie Error: Indicates the receipt of a valid State + * Cookie that has expired. + */ + len = ntohs(chunk->chunk_hdr->length); + *errp = sctp_make_op_error_space(asoc, chunk, len); + if (*errp) { + suseconds_t usecs = ktime_to_us(ktime_sub(kt, bear_cookie->expiration)); + __be32 n = htonl(usecs); + + sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, + sizeof(n)); + sctp_addto_chunk(*errp, sizeof(n), &n); + *error = -SCTP_IERROR_STALE_COOKIE; + } else + *error = -SCTP_IERROR_NOMEM; + + goto fail; + } + + /* Make a new base association. */ + scope = sctp_scope(sctp_source(chunk)); + retval = sctp_association_new(ep, ep->base.sk, scope, gfp); + if (!retval) { + *error = -SCTP_IERROR_NOMEM; + goto fail; + } + + /* Set up our peer's port number. */ + retval->peer.port = ntohs(chunk->sctp_hdr->source); + + /* Populate the association from the cookie. */ + memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); + + if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, + GFP_ATOMIC) < 0) { + *error = -SCTP_IERROR_NOMEM; + goto fail; + } + + /* Also, add the destination address. */ + if (list_empty(&retval->base.bind_addr.address_list)) { + sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, + SCTP_ADDR_SRC, GFP_ATOMIC); + } + + retval->next_tsn = retval->c.initial_tsn; + retval->ctsn_ack_point = retval->next_tsn - 1; + retval->addip_serial = retval->c.initial_tsn; + retval->adv_peer_ack_point = retval->ctsn_ack_point; + retval->peer.prsctp_capable = retval->c.prsctp_capable; + retval->peer.adaptation_ind = retval->c.adaptation_ind; + + /* The INIT stuff will be done by the side effects. */ + return retval; + +fail: + if (retval) + sctp_association_free(retval); + + return NULL; + +malformed: + /* Yikes! The packet is either corrupt or deliberately + * malformed. + */ + *error = -SCTP_IERROR_MALFORMED; + goto fail; +} + +/******************************************************************** + * 3rd Level Abstractions + ********************************************************************/ + +struct __sctp_missing { + __be32 num_missing; + __be16 type; +} __packed; + +/* + * Report a missing mandatory parameter. + */ +static int sctp_process_missing_param(const struct sctp_association *asoc, + sctp_param_t paramtype, + struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + struct __sctp_missing report; + __u16 len; + + len = WORD_ROUND(sizeof(report)); + + /* Make an ERROR chunk, preparing enough room for + * returning multiple unknown parameters. + */ + if (!*errp) + *errp = sctp_make_op_error_space(asoc, chunk, len); + + if (*errp) { + report.num_missing = htonl(1); + report.type = paramtype; + sctp_init_cause(*errp, SCTP_ERROR_MISS_PARAM, + sizeof(report)); + sctp_addto_chunk(*errp, sizeof(report), &report); + } + + /* Stop processing this chunk. */ + return 0; +} + +/* Report an Invalid Mandatory Parameter. */ +static int sctp_process_inv_mandatory(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + /* Invalid Mandatory Parameter Error has no payload. */ + + if (!*errp) + *errp = sctp_make_op_error_space(asoc, chunk, 0); + + if (*errp) + sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, 0); + + /* Stop processing this chunk. */ + return 0; +} + +static int sctp_process_inv_paramlength(const struct sctp_association *asoc, + struct sctp_paramhdr *param, + const struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + /* This is a fatal error. Any accumulated non-fatal errors are + * not reported. + */ + if (*errp) + sctp_chunk_free(*errp); + + /* Create an error chunk and fill it in with our payload. */ + *errp = sctp_make_violation_paramlen(asoc, chunk, param); + + return 0; +} + + +/* Do not attempt to handle the HOST_NAME parm. However, do + * send back an indicator to the peer. + */ +static int sctp_process_hn_param(const struct sctp_association *asoc, + union sctp_params param, + struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + __u16 len = ntohs(param.p->length); + + /* Processing of the HOST_NAME parameter will generate an + * ABORT. If we've accumulated any non-fatal errors, they + * would be unrecognized parameters and we should not include + * them in the ABORT. + */ + if (*errp) + sctp_chunk_free(*errp); + + *errp = sctp_make_op_error_space(asoc, chunk, len); + + if (*errp) { + sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len); + sctp_addto_chunk(*errp, len, param.v); + } + + /* Stop processing this chunk. */ + return 0; +} + +static int sctp_verify_ext_param(struct net *net, union sctp_params param) +{ + __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + int have_auth = 0; + int have_asconf = 0; + int i; + + for (i = 0; i < num_ext; i++) { + switch (param.ext->chunks[i]) { + case SCTP_CID_AUTH: + have_auth = 1; + break; + case SCTP_CID_ASCONF: + case SCTP_CID_ASCONF_ACK: + have_asconf = 1; + break; + } + } + + /* ADD-IP Security: The draft requires us to ABORT or ignore the + * INIT/INIT-ACK if ADD-IP is listed, but AUTH is not. Do this + * only if ADD-IP is turned on and we are not backward-compatible + * mode. + */ + if (net->sctp.addip_noauth) + return 1; + + if (net->sctp.addip_enable && !have_auth && have_asconf) + return 0; + + return 1; +} + +static void sctp_process_ext_param(struct sctp_association *asoc, + union sctp_params param) +{ + struct net *net = sock_net(asoc->base.sk); + __u16 num_ext = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + int i; + + for (i = 0; i < num_ext; i++) { + switch (param.ext->chunks[i]) { + case SCTP_CID_FWD_TSN: + if (net->sctp.prsctp_enable && !asoc->peer.prsctp_capable) + asoc->peer.prsctp_capable = 1; + break; + case SCTP_CID_AUTH: + /* if the peer reports AUTH, assume that he + * supports AUTH. + */ + if (asoc->ep->auth_enable) + asoc->peer.auth_capable = 1; + break; + case SCTP_CID_ASCONF: + case SCTP_CID_ASCONF_ACK: + if (net->sctp.addip_enable) + asoc->peer.asconf_capable = 1; + break; + default: + break; + } + } +} + +/* RFC 3.2.1 & the Implementers Guide 2.2. + * + * The Parameter Types are encoded such that the + * highest-order two bits specify the action that must be + * taken if the processing endpoint does not recognize the + * Parameter Type. + * + * 00 - Stop processing this parameter; do not process any further + * parameters within this chunk + * + * 01 - Stop processing this parameter, do not process any further + * parameters within this chunk, and report the unrecognized + * parameter in an 'Unrecognized Parameter' ERROR chunk. + * + * 10 - Skip this parameter and continue processing. + * + * 11 - Skip this parameter and continue processing but + * report the unrecognized parameter in an + * 'Unrecognized Parameter' ERROR chunk. + * + * Return value: + * SCTP_IERROR_NO_ERROR - continue with the chunk + * SCTP_IERROR_ERROR - stop and report an error. + * SCTP_IERROR_NOMEME - out of memory. + */ +static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, + union sctp_params param, + struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + int retval = SCTP_IERROR_NO_ERROR; + + switch (param.p->type & SCTP_PARAM_ACTION_MASK) { + case SCTP_PARAM_ACTION_DISCARD: + retval = SCTP_IERROR_ERROR; + break; + case SCTP_PARAM_ACTION_SKIP: + break; + case SCTP_PARAM_ACTION_DISCARD_ERR: + retval = SCTP_IERROR_ERROR; + /* Fall through */ + case SCTP_PARAM_ACTION_SKIP_ERR: + /* Make an ERROR chunk, preparing enough room for + * returning multiple unknown parameters. + */ + if (NULL == *errp) + *errp = sctp_make_op_error_fixed(asoc, chunk); + + if (*errp) { + if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, + WORD_ROUND(ntohs(param.p->length)))) + sctp_addto_chunk_fixed(*errp, + WORD_ROUND(ntohs(param.p->length)), + param.v); + } else { + /* If there is no memory for generating the ERROR + * report as specified, an ABORT will be triggered + * to the peer and the association won't be + * established. + */ + retval = SCTP_IERROR_NOMEM; + } + break; + default: + break; + } + + return retval; +} + +/* Verify variable length parameters + * Return values: + * SCTP_IERROR_ABORT - trigger an ABORT + * SCTP_IERROR_NOMEM - out of memory (abort) + * SCTP_IERROR_ERROR - stop processing, trigger an ERROR + * SCTP_IERROR_NO_ERROR - continue with the chunk + */ +static sctp_ierror_t sctp_verify_param(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + union sctp_params param, + sctp_cid_t cid, + struct sctp_chunk *chunk, + struct sctp_chunk **err_chunk) +{ + struct sctp_hmac_algo_param *hmacs; + int retval = SCTP_IERROR_NO_ERROR; + __u16 n_elt, id = 0; + int i; + + /* FIXME - This routine is not looking at each parameter per the + * chunk type, i.e., unrecognized parameters should be further + * identified based on the chunk id. + */ + + switch (param.p->type) { + case SCTP_PARAM_IPV4_ADDRESS: + case SCTP_PARAM_IPV6_ADDRESS: + case SCTP_PARAM_COOKIE_PRESERVATIVE: + case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: + case SCTP_PARAM_STATE_COOKIE: + case SCTP_PARAM_HEARTBEAT_INFO: + case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: + case SCTP_PARAM_ECN_CAPABLE: + case SCTP_PARAM_ADAPTATION_LAYER_IND: + break; + + case SCTP_PARAM_SUPPORTED_EXT: + if (!sctp_verify_ext_param(net, param)) + return SCTP_IERROR_ABORT; + break; + + case SCTP_PARAM_SET_PRIMARY: + if (net->sctp.addip_enable) + break; + goto fallthrough; + + case SCTP_PARAM_HOST_NAME_ADDRESS: + /* Tell the peer, we won't support this param. */ + sctp_process_hn_param(asoc, param, chunk, err_chunk); + retval = SCTP_IERROR_ABORT; + break; + + case SCTP_PARAM_FWD_TSN_SUPPORT: + if (net->sctp.prsctp_enable) + break; + goto fallthrough; + + case SCTP_PARAM_RANDOM: + if (!ep->auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Secion 6.1 + * If the random number is not 32 byte long the association + * MUST be aborted. The ABORT chunk SHOULD contain the error + * cause 'Protocol Violation'. + */ + if (SCTP_AUTH_RANDOM_LENGTH != + ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) { + sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + retval = SCTP_IERROR_ABORT; + } + break; + + case SCTP_PARAM_CHUNKS: + if (!ep->auth_enable) + goto fallthrough; + + /* SCTP-AUTH: Section 3.2 + * The CHUNKS parameter MUST be included once in the INIT or + * INIT-ACK chunk if the sender wants to receive authenticated + * chunks. Its maximum length is 260 bytes. + */ + if (260 < ntohs(param.p->length)) { + sctp_process_inv_paramlength(asoc, param.p, + chunk, err_chunk); + retval = SCTP_IERROR_ABORT; + } + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!ep->auth_enable) + goto fallthrough; + + hmacs = (struct sctp_hmac_algo_param *)param.p; + n_elt = (ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) >> 1; + + /* SCTP-AUTH: Section 6.1 + * The HMAC algorithm based on SHA-1 MUST be supported and + * included in the HMAC-ALGO parameter. + */ + for (i = 0; i < n_elt; i++) { + id = ntohs(hmacs->hmac_ids[i]); + + if (id == SCTP_AUTH_HMAC_ID_SHA1) + break; + } + + if (id != SCTP_AUTH_HMAC_ID_SHA1) { + sctp_process_inv_paramlength(asoc, param.p, chunk, + err_chunk); + retval = SCTP_IERROR_ABORT; + } + break; +fallthrough: + default: + pr_debug("%s: unrecognized param:%d for chunk:%d\n", + __func__, ntohs(param.p->type), cid); + + retval = sctp_process_unk_param(asoc, param, chunk, err_chunk); + break; + } + return retval; +} + +/* Verify the INIT packet before we process it. */ +int sctp_verify_init(struct net *net, const struct sctp_endpoint *ep, + const struct sctp_association *asoc, sctp_cid_t cid, + sctp_init_chunk_t *peer_init, struct sctp_chunk *chunk, + struct sctp_chunk **errp) +{ + union sctp_params param; + bool has_cookie = false; + int result; + + /* Check for missing mandatory parameters. Note: Initial TSN is + * also mandatory, but is not checked here since the valid range + * is 0..2**32-1. RFC4960, section 3.3.3. + */ + if (peer_init->init_hdr.num_outbound_streams == 0 || + peer_init->init_hdr.num_inbound_streams == 0 || + peer_init->init_hdr.init_tag == 0 || + ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW) + return sctp_process_inv_mandatory(asoc, chunk, errp); + + sctp_walk_params(param, peer_init, init_hdr.params) { + if (param.p->type == SCTP_PARAM_STATE_COOKIE) + has_cookie = true; + } + + /* There is a possibility that a parameter length was bad and + * in that case we would have stoped walking the parameters. + * The current param.p would point at the bad one. + * Current consensus on the mailing list is to generate a PROTOCOL + * VIOLATION error. We build the ERROR chunk here and let the normal + * error handling code build and send the packet. + */ + if (param.v != (void *)chunk->chunk_end) + return sctp_process_inv_paramlength(asoc, param.p, chunk, errp); + + /* The only missing mandatory param possible today is + * the state cookie for an INIT-ACK chunk. + */ + if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) + return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, + chunk, errp); + + /* Verify all the variable length parameters */ + sctp_walk_params(param, peer_init, init_hdr.params) { + result = sctp_verify_param(net, ep, asoc, param, cid, + chunk, errp); + switch (result) { + case SCTP_IERROR_ABORT: + case SCTP_IERROR_NOMEM: + return 0; + case SCTP_IERROR_ERROR: + return 1; + case SCTP_IERROR_NO_ERROR: + default: + break; + } + + } /* for (loop through all parameters) */ + + return 1; +} + +/* Unpack the parameters in an INIT packet into an association. + * Returns 0 on failure, else success. + * FIXME: This is an association method. + */ +int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk, + const union sctp_addr *peer_addr, + sctp_init_chunk_t *peer_init, gfp_t gfp) +{ + struct net *net = sock_net(asoc->base.sk); + union sctp_params param; + struct sctp_transport *transport; + struct list_head *pos, *temp; + struct sctp_af *af; + union sctp_addr addr; + char *cookie; + int src_match = 0; + + /* We must include the address that the INIT packet came from. + * This is the only address that matters for an INIT packet. + * When processing a COOKIE ECHO, we retrieve the from address + * of the INIT from the cookie. + */ + + /* This implementation defaults to making the first transport + * added as the primary transport. The source address seems to + * be a a better choice than any of the embedded addresses. + */ + if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) + goto nomem; + + if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr)) + src_match = 1; + + /* Process the initialization parameters. */ + sctp_walk_params(param, peer_init, init_hdr.params) { + if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || + param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { + af = sctp_get_af_specific(param_type2af(param.p->type)); + af->from_addr_param(&addr, param.addr, + chunk->sctp_hdr->source, 0); + if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) + src_match = 1; + } + + if (!sctp_process_param(asoc, param, peer_addr, gfp)) + goto clean_up; + } + + /* source address of chunk may not match any valid address */ + if (!src_match) + goto clean_up; + + /* AUTH: After processing the parameters, make sure that we + * have all the required info to potentially do authentications. + */ + if (asoc->peer.auth_capable && (!asoc->peer.peer_random || + !asoc->peer.peer_hmacs)) + asoc->peer.auth_capable = 0; + + /* In a non-backward compatible mode, if the peer claims + * support for ADD-IP but not AUTH, the ADD-IP spec states + * that we MUST ABORT the association. Section 6. The section + * also give us an option to silently ignore the packet, which + * is what we'll do here. + */ + if (!net->sctp.addip_noauth && + (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { + asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | + SCTP_PARAM_DEL_IP | + SCTP_PARAM_SET_PRIMARY); + asoc->peer.asconf_capable = 0; + goto clean_up; + } + + /* Walk list of transports, removing transports in the UNKNOWN state. */ + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + transport = list_entry(pos, struct sctp_transport, transports); + if (transport->state == SCTP_UNKNOWN) { + sctp_assoc_rm_peer(asoc, transport); + } + } + + /* The fixed INIT headers are always in network byte + * order. + */ + asoc->peer.i.init_tag = + ntohl(peer_init->init_hdr.init_tag); + asoc->peer.i.a_rwnd = + ntohl(peer_init->init_hdr.a_rwnd); + asoc->peer.i.num_outbound_streams = + ntohs(peer_init->init_hdr.num_outbound_streams); + asoc->peer.i.num_inbound_streams = + ntohs(peer_init->init_hdr.num_inbound_streams); + asoc->peer.i.initial_tsn = + ntohl(peer_init->init_hdr.initial_tsn); + + /* Apply the upper bounds for output streams based on peer's + * number of inbound streams. + */ + if (asoc->c.sinit_num_ostreams > + ntohs(peer_init->init_hdr.num_inbound_streams)) { + asoc->c.sinit_num_ostreams = + ntohs(peer_init->init_hdr.num_inbound_streams); + } + + if (asoc->c.sinit_max_instreams > + ntohs(peer_init->init_hdr.num_outbound_streams)) { + asoc->c.sinit_max_instreams = + ntohs(peer_init->init_hdr.num_outbound_streams); + } + + /* Copy Initiation tag from INIT to VT_peer in cookie. */ + asoc->c.peer_vtag = asoc->peer.i.init_tag; + + /* Peer Rwnd : Current calculated value of the peer's rwnd. */ + asoc->peer.rwnd = asoc->peer.i.a_rwnd; + + /* Copy cookie in case we need to resend COOKIE-ECHO. */ + cookie = asoc->peer.cookie; + if (cookie) { + asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp); + if (!asoc->peer.cookie) + goto clean_up; + } + + /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily + * high (for example, implementations MAY use the size of the receiver + * advertised window). + */ + list_for_each_entry(transport, &asoc->peer.transport_addr_list, + transports) { + transport->ssthresh = asoc->peer.i.a_rwnd; + } + + /* Set up the TSN tracking pieces. */ + if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, + asoc->peer.i.initial_tsn, gfp)) + goto clean_up; + + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number + * + * The stream sequence number in all the streams shall start + * from 0 when the association is established. Also, when the + * stream sequence number reaches the value 65535 the next + * stream sequence number shall be set to 0. + */ + + /* Allocate storage for the negotiated streams if it is not a temporary + * association. + */ + if (!asoc->temp) { + int error; + + asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, + asoc->c.sinit_num_ostreams, gfp); + if (!asoc->ssnmap) + goto clean_up; + + error = sctp_assoc_set_id(asoc, gfp); + if (error) + goto clean_up; + } + + /* ADDIP Section 4.1 ASCONF Chunk Procedures + * + * When an endpoint has an ASCONF signaled change to be sent to the + * remote endpoint it should do the following: + * ... + * A2) A serial number should be assigned to the Chunk. The serial + * number should be a monotonically increasing number. All serial + * numbers are defined to be initialized at the start of the + * association to the same value as the Initial TSN. + */ + asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; + return 1; + +clean_up: + /* Release the transport structures. */ + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + transport = list_entry(pos, struct sctp_transport, transports); + if (transport->state != SCTP_ACTIVE) + sctp_assoc_rm_peer(asoc, transport); + } + +nomem: + return 0; +} + + +/* Update asoc with the option described in param. + * + * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT + * + * asoc is the association to update. + * param is the variable length parameter to use for update. + * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. + * If the current packet is an INIT we want to minimize the amount of + * work we do. In particular, we should not build transport + * structures for the addresses. + */ +static int sctp_process_param(struct sctp_association *asoc, + union sctp_params param, + const union sctp_addr *peer_addr, + gfp_t gfp) +{ + struct net *net = sock_net(asoc->base.sk); + union sctp_addr addr; + int i; + __u16 sat; + int retval = 1; + sctp_scope_t scope; + time_t stale; + struct sctp_af *af; + union sctp_addr_param *addr_param; + struct sctp_transport *t; + struct sctp_endpoint *ep = asoc->ep; + + /* We maintain all INIT parameters in network byte order all the + * time. This allows us to not worry about whether the parameters + * came from a fresh INIT, and INIT ACK, or were stored in a cookie. + */ + switch (param.p->type) { + case SCTP_PARAM_IPV6_ADDRESS: + if (PF_INET6 != asoc->base.sk->sk_family) + break; + goto do_addr_param; + + case SCTP_PARAM_IPV4_ADDRESS: + /* v4 addresses are not allowed on v6-only socket */ + if (ipv6_only_sock(asoc->base.sk)) + break; +do_addr_param: + af = sctp_get_af_specific(param_type2af(param.p->type)); + af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); + scope = sctp_scope(peer_addr); + if (sctp_in_scope(net, &addr, scope)) + if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) + return 0; + break; + + case SCTP_PARAM_COOKIE_PRESERVATIVE: + if (!net->sctp.cookie_preserve_enable) + break; + + stale = ntohl(param.life->lifespan_increment); + + /* Suggested Cookie Life span increment's unit is msec, + * (1/1000sec). + */ + asoc->cookie_life = ktime_add_ms(asoc->cookie_life, stale); + break; + + case SCTP_PARAM_HOST_NAME_ADDRESS: + pr_debug("%s: unimplemented SCTP_HOST_NAME_ADDRESS\n", __func__); + break; + + case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: + /* Turn off the default values first so we'll know which + * ones are really set by the peer. + */ + asoc->peer.ipv4_address = 0; + asoc->peer.ipv6_address = 0; + + /* Assume that peer supports the address family + * by which it sends a packet. + */ + if (peer_addr->sa.sa_family == AF_INET6) + asoc->peer.ipv6_address = 1; + else if (peer_addr->sa.sa_family == AF_INET) + asoc->peer.ipv4_address = 1; + + /* Cycle through address types; avoid divide by 0. */ + sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + if (sat) + sat /= sizeof(__u16); + + for (i = 0; i < sat; ++i) { + switch (param.sat->types[i]) { + case SCTP_PARAM_IPV4_ADDRESS: + asoc->peer.ipv4_address = 1; + break; + + case SCTP_PARAM_IPV6_ADDRESS: + if (PF_INET6 == asoc->base.sk->sk_family) + asoc->peer.ipv6_address = 1; + break; + + case SCTP_PARAM_HOST_NAME_ADDRESS: + asoc->peer.hostname_address = 1; + break; + + default: /* Just ignore anything else. */ + break; + } + } + break; + + case SCTP_PARAM_STATE_COOKIE: + asoc->peer.cookie_len = + ntohs(param.p->length) - sizeof(sctp_paramhdr_t); + asoc->peer.cookie = param.cookie->body; + break; + + case SCTP_PARAM_HEARTBEAT_INFO: + /* Would be odd to receive, but it causes no problems. */ + break; + + case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: + /* Rejected during verify stage. */ + break; + + case SCTP_PARAM_ECN_CAPABLE: + asoc->peer.ecn_capable = 1; + break; + + case SCTP_PARAM_ADAPTATION_LAYER_IND: + asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind); + break; + + case SCTP_PARAM_SET_PRIMARY: + if (!net->sctp.addip_enable) + goto fall_through; + + addr_param = param.v + sizeof(sctp_addip_param_t); + + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); + if (af == NULL) + break; + + af->from_addr_param(&addr, addr_param, + htons(asoc->peer.port), 0); + + /* if the address is invalid, we can't process it. + * XXX: see spec for what to do. + */ + if (!af->addr_valid(&addr, NULL, NULL)) + break; + + t = sctp_assoc_lookup_paddr(asoc, &addr); + if (!t) + break; + + sctp_assoc_set_primary(asoc, t); + break; + + case SCTP_PARAM_SUPPORTED_EXT: + sctp_process_ext_param(asoc, param); + break; + + case SCTP_PARAM_FWD_TSN_SUPPORT: + if (net->sctp.prsctp_enable) { + asoc->peer.prsctp_capable = 1; + break; + } + /* Fall Through */ + goto fall_through; + + case SCTP_PARAM_RANDOM: + if (!ep->auth_enable) + goto fall_through; + + /* Save peer's random parameter */ + asoc->peer.peer_random = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_random) { + retval = 0; + break; + } + break; + + case SCTP_PARAM_HMAC_ALGO: + if (!ep->auth_enable) + goto fall_through; + + /* Save peer's HMAC list */ + asoc->peer.peer_hmacs = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_hmacs) { + retval = 0; + break; + } + + /* Set the default HMAC the peer requested*/ + sctp_auth_asoc_set_default_hmac(asoc, param.hmac_algo); + break; + + case SCTP_PARAM_CHUNKS: + if (!ep->auth_enable) + goto fall_through; + + asoc->peer.peer_chunks = kmemdup(param.p, + ntohs(param.p->length), gfp); + if (!asoc->peer.peer_chunks) + retval = 0; + break; +fall_through: + default: + /* Any unrecognized parameters should have been caught + * and handled by sctp_verify_param() which should be + * called prior to this routine. Simply log the error + * here. + */ + pr_debug("%s: ignoring param:%d for association:%p.\n", + __func__, ntohs(param.p->type), asoc); + break; + } + + return retval; +} + +/* Select a new verification tag. */ +__u32 sctp_generate_tag(const struct sctp_endpoint *ep) +{ + /* I believe that this random number generator complies with RFC1750. + * A tag of 0 is reserved for special cases (e.g. INIT). + */ + __u32 x; + + do { + get_random_bytes(&x, sizeof(__u32)); + } while (x == 0); + + return x; +} + +/* Select an initial TSN to send during startup. */ +__u32 sctp_generate_tsn(const struct sctp_endpoint *ep) +{ + __u32 retval; + + get_random_bytes(&retval, sizeof(__u32)); + return retval; +} + +/* + * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0xC1 | Chunk Flags | Chunk Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Serial Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Address Parameter | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF Parameter #1 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ \ + * / .... / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF Parameter #N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Address Parameter and other parameter will not be wrapped in this function + */ +static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, + union sctp_addr *addr, + int vparam_len) +{ + sctp_addiphdr_t asconf; + struct sctp_chunk *retval; + int length = sizeof(asconf) + vparam_len; + union sctp_addr_param addrparam; + int addrlen; + struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); + + addrlen = af->to_addr_param(addr, &addrparam); + if (!addrlen) + return NULL; + length += addrlen; + + /* Create the chunk. */ + retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length); + if (!retval) + return NULL; + + asconf.serial = htonl(asoc->addip_serial++); + + retval->subh.addip_hdr = + sctp_addto_chunk(retval, sizeof(asconf), &asconf); + retval->param_hdr.v = + sctp_addto_chunk(retval, addrlen, &addrparam); + + return retval; +} + +/* ADDIP + * 3.2.1 Add IP Address + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0xC001 | Length = Variable | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF-Request Correlation ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Address Parameter | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * 3.2.2 Delete IP Address + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0xC002 | Length = Variable | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF-Request Correlation ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Address Parameter | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + */ +struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, + union sctp_addr *laddr, + struct sockaddr *addrs, + int addrcnt, + __be16 flags) +{ + sctp_addip_param_t param; + struct sctp_chunk *retval; + union sctp_addr_param addr_param; + union sctp_addr *addr; + void *addr_buf; + struct sctp_af *af; + int paramlen = sizeof(param); + int addr_param_len = 0; + int totallen = 0; + int i; + int del_pickup = 0; + + /* Get total length of all the address parameters. */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + addr = addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + addr_param_len = af->to_addr_param(addr, &addr_param); + + totallen += paramlen; + totallen += addr_param_len; + + addr_buf += af->sockaddr_len; + if (asoc->asconf_addr_del_pending && !del_pickup) { + /* reuse the parameter length from the same scope one */ + totallen += paramlen; + totallen += addr_param_len; + del_pickup = 1; + + pr_debug("%s: picked same-scope del_pending addr, " + "totallen for all addresses is %d\n", + __func__, totallen); + } + } + + /* Create an asconf chunk with the required length. */ + retval = sctp_make_asconf(asoc, laddr, totallen); + if (!retval) + return NULL; + + /* Add the address parameters to the asconf chunk. */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + addr = addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + addr_param_len = af->to_addr_param(addr, &addr_param); + param.param_hdr.type = flags; + param.param_hdr.length = htons(paramlen + addr_param_len); + param.crr_id = i; + + sctp_addto_chunk(retval, paramlen, ¶m); + sctp_addto_chunk(retval, addr_param_len, &addr_param); + + addr_buf += af->sockaddr_len; + } + if (flags == SCTP_PARAM_ADD_IP && del_pickup) { + addr = asoc->asconf_addr_del_pending; + af = sctp_get_af_specific(addr->v4.sin_family); + addr_param_len = af->to_addr_param(addr, &addr_param); + param.param_hdr.type = SCTP_PARAM_DEL_IP; + param.param_hdr.length = htons(paramlen + addr_param_len); + param.crr_id = i; + + sctp_addto_chunk(retval, paramlen, ¶m); + sctp_addto_chunk(retval, addr_param_len, &addr_param); + } + return retval; +} + +/* ADDIP + * 3.2.4 Set Primary IP Address + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type =0xC004 | Length = Variable | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF-Request Correlation ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Address Parameter | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Create an ASCONF chunk with Set Primary IP address parameter. + */ +struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, + union sctp_addr *addr) +{ + sctp_addip_param_t param; + struct sctp_chunk *retval; + int len = sizeof(param); + union sctp_addr_param addrparam; + int addrlen; + struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); + + addrlen = af->to_addr_param(addr, &addrparam); + if (!addrlen) + return NULL; + len += addrlen; + + /* Create the chunk and make asconf header. */ + retval = sctp_make_asconf(asoc, addr, len); + if (!retval) + return NULL; + + param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; + param.param_hdr.length = htons(len); + param.crr_id = 0; + + sctp_addto_chunk(retval, sizeof(param), ¶m); + sctp_addto_chunk(retval, addrlen, &addrparam); + + return retval; +} + +/* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Type = 0x80 | Chunk Flags | Chunk Length | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Serial Number | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF Parameter Response#1 | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * \ \ + * / .... / + * \ \ + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | ASCONF Parameter Response#N | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * Create an ASCONF_ACK chunk with enough space for the parameter responses. + */ +static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, + __u32 serial, int vparam_len) +{ + sctp_addiphdr_t asconf; + struct sctp_chunk *retval; + int length = sizeof(asconf) + vparam_len; + + /* Create the chunk. */ + retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length); + if (!retval) + return NULL; + + asconf.serial = htonl(serial); + + retval->subh.addip_hdr = + sctp_addto_chunk(retval, sizeof(asconf), &asconf); + + return retval; +} + +/* Add response parameters to an ASCONF_ACK chunk. */ +static void sctp_add_asconf_response(struct sctp_chunk *chunk, __be32 crr_id, + __be16 err_code, sctp_addip_param_t *asconf_param) +{ + sctp_addip_param_t ack_param; + sctp_errhdr_t err_param; + int asconf_param_len = 0; + int err_param_len = 0; + __be16 response_type; + + if (SCTP_ERROR_NO_ERROR == err_code) { + response_type = SCTP_PARAM_SUCCESS_REPORT; + } else { + response_type = SCTP_PARAM_ERR_CAUSE; + err_param_len = sizeof(err_param); + if (asconf_param) + asconf_param_len = + ntohs(asconf_param->param_hdr.length); + } + + /* Add Success Indication or Error Cause Indication parameter. */ + ack_param.param_hdr.type = response_type; + ack_param.param_hdr.length = htons(sizeof(ack_param) + + err_param_len + + asconf_param_len); + ack_param.crr_id = crr_id; + sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); + + if (SCTP_ERROR_NO_ERROR == err_code) + return; + + /* Add Error Cause parameter. */ + err_param.cause = err_code; + err_param.length = htons(err_param_len + asconf_param_len); + sctp_addto_chunk(chunk, err_param_len, &err_param); + + /* Add the failed TLV copied from ASCONF chunk. */ + if (asconf_param) + sctp_addto_chunk(chunk, asconf_param_len, asconf_param); +} + +/* Process a asconf parameter. */ +static __be16 sctp_process_asconf_param(struct sctp_association *asoc, + struct sctp_chunk *asconf, + sctp_addip_param_t *asconf_param) +{ + struct sctp_transport *peer; + struct sctp_af *af; + union sctp_addr addr; + union sctp_addr_param *addr_param; + + addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); + + if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && + asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && + asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) + return SCTP_ERROR_UNKNOWN_PARAM; + + switch (addr_param->p.type) { + case SCTP_PARAM_IPV6_ADDRESS: + if (!asoc->peer.ipv6_address) + return SCTP_ERROR_DNS_FAILED; + break; + case SCTP_PARAM_IPV4_ADDRESS: + if (!asoc->peer.ipv4_address) + return SCTP_ERROR_DNS_FAILED; + break; + default: + return SCTP_ERROR_DNS_FAILED; + } + + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); + if (unlikely(!af)) + return SCTP_ERROR_DNS_FAILED; + + af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); + + /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast + * or multicast address. + * (note: wildcard is permitted and requires special handling so + * make sure we check for that) + */ + if (!af->is_any(&addr) && !af->addr_valid(&addr, NULL, asconf->skb)) + return SCTP_ERROR_DNS_FAILED; + + switch (asconf_param->param_hdr.type) { + case SCTP_PARAM_ADD_IP: + /* Section 4.2.1: + * If the address 0.0.0.0 or ::0 is provided, the source + * address of the packet MUST be added. + */ + if (af->is_any(&addr)) + memcpy(&addr, &asconf->source, sizeof(addr)); + + /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address + * request and does not have the local resources to add this + * new address to the association, it MUST return an Error + * Cause TLV set to the new error code 'Operation Refused + * Due to Resource Shortage'. + */ + + peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED); + if (!peer) + return SCTP_ERROR_RSRC_LOW; + + /* Start the heartbeat timer. */ + if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) + sctp_transport_hold(peer); + asoc->new_transport = peer; + break; + case SCTP_PARAM_DEL_IP: + /* ADDIP 4.3 D7) If a request is received to delete the + * last remaining IP address of a peer endpoint, the receiver + * MUST send an Error Cause TLV with the error cause set to the + * new error code 'Request to Delete Last Remaining IP Address'. + */ + if (asoc->peer.transport_count == 1) + return SCTP_ERROR_DEL_LAST_IP; + + /* ADDIP 4.3 D8) If a request is received to delete an IP + * address which is also the source address of the IP packet + * which contained the ASCONF chunk, the receiver MUST reject + * this request. To reject the request the receiver MUST send + * an Error Cause TLV set to the new error code 'Request to + * Delete Source IP Address' + */ + if (sctp_cmp_addr_exact(&asconf->source, &addr)) + return SCTP_ERROR_DEL_SRC_IP; + + /* Section 4.2.2 + * If the address 0.0.0.0 or ::0 is provided, all + * addresses of the peer except the source address of the + * packet MUST be deleted. + */ + if (af->is_any(&addr)) { + sctp_assoc_set_primary(asoc, asconf->transport); + sctp_assoc_del_nonprimary_peers(asoc, + asconf->transport); + } else + sctp_assoc_del_peer(asoc, &addr); + break; + case SCTP_PARAM_SET_PRIMARY: + /* ADDIP Section 4.2.4 + * If the address 0.0.0.0 or ::0 is provided, the receiver + * MAY mark the source address of the packet as its + * primary. + */ + if (af->is_any(&addr)) + memcpy(&addr.v4, sctp_source(asconf), sizeof(addr)); + + peer = sctp_assoc_lookup_paddr(asoc, &addr); + if (!peer) + return SCTP_ERROR_DNS_FAILED; + + sctp_assoc_set_primary(asoc, peer); + break; + } + + return SCTP_ERROR_NO_ERROR; +} + +/* Verify the ASCONF packet before we process it. */ +bool sctp_verify_asconf(const struct sctp_association *asoc, + struct sctp_chunk *chunk, bool addr_param_needed, + struct sctp_paramhdr **errp) +{ + sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; + union sctp_params param; + bool addr_param_seen = false; + + sctp_walk_params(param, addip, addip_hdr.params) { + size_t length = ntohs(param.p->length); + + *errp = param.p; + switch (param.p->type) { + case SCTP_PARAM_ERR_CAUSE: + break; + case SCTP_PARAM_IPV4_ADDRESS: + if (length != sizeof(sctp_ipv4addr_param_t)) + return false; + addr_param_seen = true; + break; + case SCTP_PARAM_IPV6_ADDRESS: + if (length != sizeof(sctp_ipv6addr_param_t)) + return false; + addr_param_seen = true; + break; + case SCTP_PARAM_ADD_IP: + case SCTP_PARAM_DEL_IP: + case SCTP_PARAM_SET_PRIMARY: + /* In ASCONF chunks, these need to be first. */ + if (addr_param_needed && !addr_param_seen) + return false; + length = ntohs(param.addip->param_hdr.length); + if (length < sizeof(sctp_addip_param_t) + + sizeof(sctp_paramhdr_t)) + return false; + break; + case SCTP_PARAM_SUCCESS_REPORT: + case SCTP_PARAM_ADAPTATION_LAYER_IND: + if (length != sizeof(sctp_addip_param_t)) + return false; + break; + default: + /* This is unkown to us, reject! */ + return false; + } + } + + /* Remaining sanity checks. */ + if (addr_param_needed && !addr_param_seen) + return false; + if (!addr_param_needed && addr_param_seen) + return false; + if (param.v != chunk->chunk_end) + return false; + + return true; +} + +/* Process an incoming ASCONF chunk with the next expected serial no. and + * return an ASCONF_ACK chunk to be sent in response. + */ +struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, + struct sctp_chunk *asconf) +{ + sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; + bool all_param_pass = true; + union sctp_params param; + sctp_addiphdr_t *hdr; + union sctp_addr_param *addr_param; + sctp_addip_param_t *asconf_param; + struct sctp_chunk *asconf_ack; + __be16 err_code; + int length = 0; + int chunk_len; + __u32 serial; + + chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); + hdr = (sctp_addiphdr_t *)asconf->skb->data; + serial = ntohl(hdr->serial); + + /* Skip the addiphdr and store a pointer to address parameter. */ + length = sizeof(sctp_addiphdr_t); + addr_param = (union sctp_addr_param *)(asconf->skb->data + length); + chunk_len -= length; + + /* Skip the address parameter and store a pointer to the first + * asconf parameter. + */ + length = ntohs(addr_param->p.length); + asconf_param = (void *)addr_param + length; + chunk_len -= length; + + /* create an ASCONF_ACK chunk. + * Based on the definitions of parameters, we know that the size of + * ASCONF_ACK parameters are less than or equal to the fourfold of ASCONF + * parameters. + */ + asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 4); + if (!asconf_ack) + goto done; + + /* Process the TLVs contained within the ASCONF chunk. */ + sctp_walk_params(param, addip, addip_hdr.params) { + /* Skip preceeding address parameters. */ + if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || + param.p->type == SCTP_PARAM_IPV6_ADDRESS) + continue; + + err_code = sctp_process_asconf_param(asoc, asconf, + param.addip); + /* ADDIP 4.1 A7) + * If an error response is received for a TLV parameter, + * all TLVs with no response before the failed TLV are + * considered successful if not reported. All TLVs after + * the failed response are considered unsuccessful unless + * a specific success indication is present for the parameter. + */ + if (err_code != SCTP_ERROR_NO_ERROR) + all_param_pass = false; + if (!all_param_pass) + sctp_add_asconf_response(asconf_ack, param.addip->crr_id, + err_code, param.addip); + + /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add + * an IP address sends an 'Out of Resource' in its response, it + * MUST also fail any subsequent add or delete requests bundled + * in the ASCONF. + */ + if (err_code == SCTP_ERROR_RSRC_LOW) + goto done; + } +done: + asoc->peer.addip_serial++; + + /* If we are sending a new ASCONF_ACK hold a reference to it in assoc + * after freeing the reference to old asconf ack if any. + */ + if (asconf_ack) { + sctp_chunk_hold(asconf_ack); + list_add_tail(&asconf_ack->transmitted_list, + &asoc->asconf_ack_list); + } + + return asconf_ack; +} + +/* Process a asconf parameter that is successfully acked. */ +static void sctp_asconf_param_success(struct sctp_association *asoc, + sctp_addip_param_t *asconf_param) +{ + struct sctp_af *af; + union sctp_addr addr; + struct sctp_bind_addr *bp = &asoc->base.bind_addr; + union sctp_addr_param *addr_param; + struct sctp_transport *transport; + struct sctp_sockaddr_entry *saddr; + + addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t); + + /* We have checked the packet before, so we do not check again. */ + af = sctp_get_af_specific(param_type2af(addr_param->p.type)); + af->from_addr_param(&addr, addr_param, htons(bp->port), 0); + + switch (asconf_param->param_hdr.type) { + case SCTP_PARAM_ADD_IP: + /* This is always done in BH context with a socket lock + * held, so the list can not change. + */ + local_bh_disable(); + list_for_each_entry(saddr, &bp->address_list, list) { + if (sctp_cmp_addr_exact(&saddr->a, &addr)) + saddr->state = SCTP_ADDR_SRC; + } + local_bh_enable(); + list_for_each_entry(transport, &asoc->peer.transport_addr_list, + transports) { + dst_release(transport->dst); + transport->dst = NULL; + } + break; + case SCTP_PARAM_DEL_IP: + local_bh_disable(); + sctp_del_bind_addr(bp, &addr); + if (asoc->asconf_addr_del_pending != NULL && + sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) { + kfree(asoc->asconf_addr_del_pending); + asoc->asconf_addr_del_pending = NULL; + } + local_bh_enable(); + list_for_each_entry(transport, &asoc->peer.transport_addr_list, + transports) { + dst_release(transport->dst); + transport->dst = NULL; + } + break; + default: + break; + } +} + +/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk + * for the given asconf parameter. If there is no response for this parameter, + * return the error code based on the third argument 'no_err'. + * ADDIP 4.1 + * A7) If an error response is received for a TLV parameter, all TLVs with no + * response before the failed TLV are considered successful if not reported. + * All TLVs after the failed response are considered unsuccessful unless a + * specific success indication is present for the parameter. + */ +static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, + sctp_addip_param_t *asconf_param, + int no_err) +{ + sctp_addip_param_t *asconf_ack_param; + sctp_errhdr_t *err_param; + int length; + int asconf_ack_len; + __be16 err_code; + + if (no_err) + err_code = SCTP_ERROR_NO_ERROR; + else + err_code = SCTP_ERROR_REQ_REFUSED; + + asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t); + + /* Skip the addiphdr from the asconf_ack chunk and store a pointer to + * the first asconf_ack parameter. + */ + length = sizeof(sctp_addiphdr_t); + asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + + length); + asconf_ack_len -= length; + + while (asconf_ack_len > 0) { + if (asconf_ack_param->crr_id == asconf_param->crr_id) { + switch (asconf_ack_param->param_hdr.type) { + case SCTP_PARAM_SUCCESS_REPORT: + return SCTP_ERROR_NO_ERROR; + case SCTP_PARAM_ERR_CAUSE: + length = sizeof(sctp_addip_param_t); + err_param = (void *)asconf_ack_param + length; + asconf_ack_len -= length; + if (asconf_ack_len > 0) + return err_param->cause; + else + return SCTP_ERROR_INV_PARAM; + break; + default: + return SCTP_ERROR_INV_PARAM; + } + } + + length = ntohs(asconf_ack_param->param_hdr.length); + asconf_ack_param = (void *)asconf_ack_param + length; + asconf_ack_len -= length; + } + + return err_code; +} + +/* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ +int sctp_process_asconf_ack(struct sctp_association *asoc, + struct sctp_chunk *asconf_ack) +{ + struct sctp_chunk *asconf = asoc->addip_last_asconf; + union sctp_addr_param *addr_param; + sctp_addip_param_t *asconf_param; + int length = 0; + int asconf_len = asconf->skb->len; + int all_param_pass = 0; + int no_err = 1; + int retval = 0; + __be16 err_code = SCTP_ERROR_NO_ERROR; + + /* Skip the chunkhdr and addiphdr from the last asconf sent and store + * a pointer to address parameter. + */ + length = sizeof(sctp_addip_chunk_t); + addr_param = (union sctp_addr_param *)(asconf->skb->data + length); + asconf_len -= length; + + /* Skip the address parameter in the last asconf sent and store a + * pointer to the first asconf parameter. + */ + length = ntohs(addr_param->p.length); + asconf_param = (void *)addr_param + length; + asconf_len -= length; + + /* ADDIP 4.1 + * A8) If there is no response(s) to specific TLV parameter(s), and no + * failures are indicated, then all request(s) are considered + * successful. + */ + if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) + all_param_pass = 1; + + /* Process the TLVs contained in the last sent ASCONF chunk. */ + while (asconf_len > 0) { + if (all_param_pass) + err_code = SCTP_ERROR_NO_ERROR; + else { + err_code = sctp_get_asconf_response(asconf_ack, + asconf_param, + no_err); + if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) + no_err = 0; + } + + switch (err_code) { + case SCTP_ERROR_NO_ERROR: + sctp_asconf_param_success(asoc, asconf_param); + break; + + case SCTP_ERROR_RSRC_LOW: + retval = 1; + break; + + case SCTP_ERROR_UNKNOWN_PARAM: + /* Disable sending this type of asconf parameter in + * future. + */ + asoc->peer.addip_disabled_mask |= + asconf_param->param_hdr.type; + break; + + case SCTP_ERROR_REQ_REFUSED: + case SCTP_ERROR_DEL_LAST_IP: + case SCTP_ERROR_DEL_SRC_IP: + default: + break; + } + + /* Skip the processed asconf parameter and move to the next + * one. + */ + length = ntohs(asconf_param->param_hdr.length); + asconf_param = (void *)asconf_param + length; + asconf_len -= length; + } + + if (no_err && asoc->src_out_of_asoc_ok) { + asoc->src_out_of_asoc_ok = 0; + sctp_transport_immediate_rtx(asoc->peer.primary_path); + } + + /* Free the cached last sent asconf chunk. */ + list_del_init(&asconf->transmitted_list); + sctp_chunk_free(asconf); + asoc->addip_last_asconf = NULL; + + return retval; +} + +/* Make a FWD TSN chunk. */ +struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, + __u32 new_cum_tsn, size_t nstreams, + struct sctp_fwdtsn_skip *skiplist) +{ + struct sctp_chunk *retval = NULL; + struct sctp_fwdtsn_hdr ftsn_hdr; + struct sctp_fwdtsn_skip skip; + size_t hint; + int i; + + hint = (nstreams + 1) * sizeof(__u32); + + retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint); + + if (!retval) + return NULL; + + ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); + retval->subh.fwdtsn_hdr = + sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); + + for (i = 0; i < nstreams; i++) { + skip.stream = skiplist[i].stream; + skip.ssn = skiplist[i].ssn; + sctp_addto_chunk(retval, sizeof(skip), &skip); + } + + return retval; +} diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c new file mode 100644 index 000000000..fef2acdf4 --- /dev/null +++ b/net/sctp/sm_sideeffect.c @@ -0,0 +1,1751 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * + * This file is part of the SCTP kernel implementation + * + * These functions work with the state functions in sctp_sm_statefuns.c + * to implement that state operations. These functions implement the + * steps which require modifying existing data structures. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@austin.ibm.com> + * Hui Huang <hui.huang@nokia.com> + * Dajiang Zhang <dajiang.zhang@nokia.com> + * Daisy Chang <daisyc@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/skbuff.h> +#include <linux/types.h> +#include <linux/socket.h> +#include <linux/ip.h> +#include <linux/gfp.h> +#include <net/sock.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static int sctp_cmd_interpreter(sctp_event_t event_type, + sctp_subtype_t subtype, + sctp_state_t state, + struct sctp_endpoint *ep, + struct sctp_association *asoc, + void *event_arg, + sctp_disposition_t status, + sctp_cmd_seq_t *commands, + gfp_t gfp); +static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, + sctp_state_t state, + struct sctp_endpoint *ep, + struct sctp_association *asoc, + void *event_arg, + sctp_disposition_t status, + sctp_cmd_seq_t *commands, + gfp_t gfp); + +static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, + struct sctp_transport *t); +/******************************************************************** + * Helper functions + ********************************************************************/ + +/* A helper function for delayed processing of INET ECN CE bit. */ +static void sctp_do_ecn_ce_work(struct sctp_association *asoc, + __u32 lowest_tsn) +{ + /* Save the TSN away for comparison when we receive CWR */ + + asoc->last_ecne_tsn = lowest_tsn; + asoc->need_ecne = 1; +} + +/* Helper function for delayed processing of SCTP ECNE chunk. */ +/* RFC 2960 Appendix A + * + * RFC 2481 details a specific bit for a sender to send in + * the header of its next outbound TCP segment to indicate to + * its peer that it has reduced its congestion window. This + * is termed the CWR bit. For SCTP the same indication is made + * by including the CWR chunk. This chunk contains one data + * element, i.e. the TSN number that was sent in the ECNE chunk. + * This element represents the lowest TSN number in the datagram + * that was originally marked with the CE bit. + */ +static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, + __u32 lowest_tsn, + struct sctp_chunk *chunk) +{ + struct sctp_chunk *repl; + + /* Our previously transmitted packet ran into some congestion + * so we should take action by reducing cwnd and ssthresh + * and then ACK our peer that we we've done so by + * sending a CWR. + */ + + /* First, try to determine if we want to actually lower + * our cwnd variables. Only lower them if the ECNE looks more + * recent than the last response. + */ + if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { + struct sctp_transport *transport; + + /* Find which transport's congestion variables + * need to be adjusted. + */ + transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); + + /* Update the congestion variables. */ + if (transport) + sctp_transport_lower_cwnd(transport, + SCTP_LOWER_CWND_ECNE); + asoc->last_cwr_tsn = lowest_tsn; + } + + /* Always try to quiet the other end. In case of lost CWR, + * resend last_cwr_tsn. + */ + repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); + + /* If we run out of memory, it will look like a lost CWR. We'll + * get back in sync eventually. + */ + return repl; +} + +/* Helper function to do delayed processing of ECN CWR chunk. */ +static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, + __u32 lowest_tsn) +{ + /* Turn off ECNE getting auto-prepended to every outgoing + * packet + */ + asoc->need_ecne = 0; +} + +/* Generate SACK if necessary. We call this at the end of a packet. */ +static int sctp_gen_sack(struct sctp_association *asoc, int force, + sctp_cmd_seq_t *commands) +{ + __u32 ctsn, max_tsn_seen; + struct sctp_chunk *sack; + struct sctp_transport *trans = asoc->peer.last_data_from; + int error = 0; + + if (force || + (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || + (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) + asoc->peer.sack_needed = 1; + + ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); + + /* From 12.2 Parameters necessary per association (i.e. the TCB): + * + * Ack State : This flag indicates if the next received packet + * : is to be responded to with a SACK. ... + * : When DATA chunks are out of order, SACK's + * : are not delayed (see Section 6). + * + * [This is actually not mentioned in Section 6, but we + * implement it here anyway. --piggy] + */ + if (max_tsn_seen != ctsn) + asoc->peer.sack_needed = 1; + + /* From 6.2 Acknowledgement on Reception of DATA Chunks: + * + * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, + * an acknowledgement SHOULD be generated for at least every + * second packet (not every second DATA chunk) received, and + * SHOULD be generated within 200 ms of the arrival of any + * unacknowledged DATA chunk. ... + */ + if (!asoc->peer.sack_needed) { + asoc->peer.sack_cnt++; + + /* Set the SACK delay timeout based on the + * SACK delay for the last transport + * data was received from, or the default + * for the association. + */ + if (trans) { + /* We will need a SACK for the next packet. */ + if (asoc->peer.sack_cnt >= trans->sackfreq - 1) + asoc->peer.sack_needed = 1; + + asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = + trans->sackdelay; + } else { + /* We will need a SACK for the next packet. */ + if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) + asoc->peer.sack_needed = 1; + + asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = + asoc->sackdelay; + } + + /* Restart the SACK timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); + } else { + asoc->a_rwnd = asoc->rwnd; + sack = sctp_make_sack(asoc); + if (!sack) + goto nomem; + + asoc->peer.sack_needed = 0; + asoc->peer.sack_cnt = 0; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); + + /* Stop the SACK timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); + } + + return error; +nomem: + error = -ENOMEM; + return error; +} + +/* When the T3-RTX timer expires, it calls this function to create the + * relevant state machine event. + */ +void sctp_generate_t3_rtx_event(unsigned long peer) +{ + int error; + struct sctp_transport *transport = (struct sctp_transport *) peer; + struct sctp_association *asoc = transport->asoc; + struct net *net = sock_net(asoc->base.sk); + + /* Check whether a task is in the sock. */ + + bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + pr_debug("%s: sock is busy\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) + sctp_transport_hold(transport); + goto out_unlock; + } + + /* Is this transport really dead and just waiting around for + * the timer to let go of the reference? + */ + if (transport->dead) + goto out_unlock; + + /* Run through the state machine. */ + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, + SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), + asoc->state, + asoc->ep, asoc, + transport, GFP_ATOMIC); + + if (error) + asoc->base.sk->sk_err = -error; + +out_unlock: + bh_unlock_sock(asoc->base.sk); + sctp_transport_put(transport); +} + +/* This is a sa interface for producing timeout events. It works + * for timeouts which use the association as their parameter. + */ +static void sctp_generate_timeout_event(struct sctp_association *asoc, + sctp_event_timeout_t timeout_type) +{ + struct net *net = sock_net(asoc->base.sk); + int error = 0; + + bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + pr_debug("%s: sock is busy: timer %d\n", __func__, + timeout_type); + + /* Try again later. */ + if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) + sctp_association_hold(asoc); + goto out_unlock; + } + + /* Is this association really dead and just waiting around for + * the timer to let go of the reference? + */ + if (asoc->base.dead) + goto out_unlock; + + /* Run through the state machine. */ + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, + SCTP_ST_TIMEOUT(timeout_type), + asoc->state, asoc->ep, asoc, + (void *)timeout_type, GFP_ATOMIC); + + if (error) + asoc->base.sk->sk_err = -error; + +out_unlock: + bh_unlock_sock(asoc->base.sk); + sctp_association_put(asoc); +} + +static void sctp_generate_t1_cookie_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); +} + +static void sctp_generate_t1_init_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); +} + +static void sctp_generate_t2_shutdown_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); +} + +static void sctp_generate_t4_rto_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); +} + +static void sctp_generate_t5_shutdown_guard_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *)data; + sctp_generate_timeout_event(asoc, + SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); + +} /* sctp_generate_t5_shutdown_guard_event() */ + +static void sctp_generate_autoclose_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); +} + +/* Generate a heart beat event. If the sock is busy, reschedule. Make + * sure that the transport is still valid. + */ +void sctp_generate_heartbeat_event(unsigned long data) +{ + int error = 0; + struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_association *asoc = transport->asoc; + struct net *net = sock_net(asoc->base.sk); + + bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + pr_debug("%s: sock is busy\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) + sctp_transport_hold(transport); + goto out_unlock; + } + + /* Is this structure just waiting around for us to actually + * get destroyed? + */ + if (transport->dead) + goto out_unlock; + + error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, + SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), + asoc->state, asoc->ep, asoc, + transport, GFP_ATOMIC); + + if (error) + asoc->base.sk->sk_err = -error; + +out_unlock: + bh_unlock_sock(asoc->base.sk); + sctp_transport_put(transport); +} + +/* Handle the timeout of the ICMP protocol unreachable timer. Trigger + * the correct state machine transition that will close the association. + */ +void sctp_generate_proto_unreach_event(unsigned long data) +{ + struct sctp_transport *transport = (struct sctp_transport *) data; + struct sctp_association *asoc = transport->asoc; + struct net *net = sock_net(asoc->base.sk); + + bh_lock_sock(asoc->base.sk); + if (sock_owned_by_user(asoc->base.sk)) { + pr_debug("%s: sock is busy\n", __func__); + + /* Try again later. */ + if (!mod_timer(&transport->proto_unreach_timer, + jiffies + (HZ/20))) + sctp_association_hold(asoc); + goto out_unlock; + } + + /* Is this structure just waiting around for us to actually + * get destroyed? + */ + if (asoc->base.dead) + goto out_unlock; + + sctp_do_sm(net, SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), + asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); + +out_unlock: + bh_unlock_sock(asoc->base.sk); + sctp_association_put(asoc); +} + + +/* Inject a SACK Timeout event into the state machine. */ +static void sctp_generate_sack_event(unsigned long data) +{ + struct sctp_association *asoc = (struct sctp_association *) data; + sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); +} + +sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { + NULL, + sctp_generate_t1_cookie_event, + sctp_generate_t1_init_event, + sctp_generate_t2_shutdown_event, + NULL, + sctp_generate_t4_rto_event, + sctp_generate_t5_shutdown_guard_event, + NULL, + sctp_generate_sack_event, + sctp_generate_autoclose_event, +}; + + +/* RFC 2960 8.2 Path Failure Detection + * + * When its peer endpoint is multi-homed, an endpoint should keep a + * error counter for each of the destination transport addresses of the + * peer endpoint. + * + * Each time the T3-rtx timer expires on any address, or when a + * HEARTBEAT sent to an idle address is not acknowledged within a RTO, + * the error counter of that destination address will be incremented. + * When the value in the error counter exceeds the protocol parameter + * 'Path.Max.Retrans' of that destination address, the endpoint should + * mark the destination transport address as inactive, and a + * notification SHOULD be sent to the upper layer. + * + */ +static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, + struct sctp_association *asoc, + struct sctp_transport *transport, + int is_hb) +{ + /* The check for association's overall error counter exceeding the + * threshold is done in the state function. + */ + /* We are here due to a timer expiration. If the timer was + * not a HEARTBEAT, then normal error tracking is done. + * If the timer was a heartbeat, we only increment error counts + * when we already have an outstanding HEARTBEAT that has not + * been acknowledged. + * Additionally, some tranport states inhibit error increments. + */ + if (!is_hb) { + asoc->overall_error_count++; + if (transport->state != SCTP_INACTIVE) + transport->error_count++; + } else if (transport->hb_sent) { + if (transport->state != SCTP_UNCONFIRMED) + asoc->overall_error_count++; + if (transport->state != SCTP_INACTIVE) + transport->error_count++; + } + + /* If the transport error count is greater than the pf_retrans + * threshold, and less than pathmaxrtx, and if the current state + * is SCTP_ACTIVE, then mark this transport as Partially Failed, + * see SCTP Quick Failover Draft, section 5.1 + */ + if ((transport->state == SCTP_ACTIVE) && + (asoc->pf_retrans < transport->pathmaxrxt) && + (transport->error_count > asoc->pf_retrans)) { + + sctp_assoc_control_transport(asoc, transport, + SCTP_TRANSPORT_PF, + 0); + + /* Update the hb timer to resend a heartbeat every rto */ + sctp_cmd_hb_timer_update(commands, transport); + } + + if (transport->state != SCTP_INACTIVE && + (transport->error_count > transport->pathmaxrxt)) { + pr_debug("%s: association:%p transport addr:%pISpc failed\n", + __func__, asoc, &transport->ipaddr.sa); + + sctp_assoc_control_transport(asoc, transport, + SCTP_TRANSPORT_DOWN, + SCTP_FAILED_THRESHOLD); + } + + /* E2) For the destination address for which the timer + * expires, set RTO <- RTO * 2 ("back off the timer"). The + * maximum value discussed in rule C7 above (RTO.max) may be + * used to provide an upper bound to this doubling operation. + * + * Special Case: the first HB doesn't trigger exponential backoff. + * The first unacknowledged HB triggers it. We do this with a flag + * that indicates that we have an outstanding HB. + */ + if (!is_hb || transport->hb_sent) { + transport->rto = min((transport->rto * 2), transport->asoc->rto_max); + sctp_max_rto(asoc, transport); + } +} + +/* Worker routine to handle INIT command failure. */ +static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, + struct sctp_association *asoc, + unsigned int error) +{ + struct sctp_ulpevent *event; + + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, + (__u16)error, 0, 0, NULL, + GFP_ATOMIC); + + if (event) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(event)); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + + /* SEND_FAILED sent later when cleaning up the association. */ + asoc->outqueue.error = error; + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); +} + +/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ +static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, + struct sctp_association *asoc, + sctp_event_t event_type, + sctp_subtype_t subtype, + struct sctp_chunk *chunk, + unsigned int error) +{ + struct sctp_ulpevent *event; + struct sctp_chunk *abort; + /* Cancel any partial delivery in progress. */ + sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); + + if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, chunk, + GFP_ATOMIC); + else + event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, + (__u16)error, 0, 0, NULL, + GFP_ATOMIC); + if (event) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(event)); + + if (asoc->overall_error_count >= asoc->max_retrans) { + abort = sctp_make_violation_max_retrans(asoc, chunk); + if (abort) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(abort)); + } + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + + /* SEND_FAILED sent later when cleaning up the association. */ + asoc->outqueue.error = error; + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); +} + +/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT + * inside the cookie. In reality, this is only used for INIT-ACK processing + * since all other cases use "temporary" associations and can do all + * their work in statefuns directly. + */ +static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, + struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_init_chunk_t *peer_init, + gfp_t gfp) +{ + int error; + + /* We only process the init as a sideeffect in a single + * case. This is when we process the INIT-ACK. If we + * fail during INIT processing (due to malloc problems), + * just return the error and stop processing the stack. + */ + if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) + error = -ENOMEM; + else + error = 0; + + return error; +} + +/* Helper function to break out starting up of heartbeat timers. */ +static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc) +{ + struct sctp_transport *t; + + /* Start a heartbeat timer for each transport on the association. + * hold a reference on the transport to make sure none of + * the needed data structures go away. + */ + list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { + + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) + sctp_transport_hold(t); + } +} + +static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc) +{ + struct sctp_transport *t; + + /* Stop all heartbeat timers. */ + + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + if (del_timer(&t->hb_timer)) + sctp_transport_put(t); + } +} + +/* Helper function to stop any pending T3-RTX timers */ +static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc) +{ + struct sctp_transport *t; + + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + if (del_timer(&t->T3_rtx_timer)) + sctp_transport_put(t); + } +} + + +/* Helper function to update the heartbeat timer. */ +static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, + struct sctp_transport *t) +{ + /* Update the heartbeat timer. */ + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) + sctp_transport_hold(t); +} + +/* Helper function to handle the reception of an HEARTBEAT ACK. */ +static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + struct sctp_transport *t, + struct sctp_chunk *chunk) +{ + sctp_sender_hb_info_t *hbinfo; + int was_unconfirmed = 0; + + /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the + * HEARTBEAT should clear the error counter of the destination + * transport address to which the HEARTBEAT was sent. + */ + t->error_count = 0; + + /* + * Although RFC4960 specifies that the overall error count must + * be cleared when a HEARTBEAT ACK is received, we make an + * exception while in SHUTDOWN PENDING. If the peer keeps its + * window shut forever, we may never be able to transmit our + * outstanding data and rely on the retransmission limit be reached + * to shutdown the association. + */ + if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING) + t->asoc->overall_error_count = 0; + + /* Clear the hb_sent flag to signal that we had a good + * acknowledgement. + */ + t->hb_sent = 0; + + /* Mark the destination transport address as active if it is not so + * marked. + */ + if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { + was_unconfirmed = 1; + sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, + SCTP_HEARTBEAT_SUCCESS); + } + + if (t->state == SCTP_PF) + sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, + SCTP_HEARTBEAT_SUCCESS); + + /* HB-ACK was received for a the proper HB. Consider this + * forward progress. + */ + if (t->dst) + dst_confirm(t->dst); + + /* The receiver of the HEARTBEAT ACK should also perform an + * RTT measurement for that destination transport address + * using the time value carried in the HEARTBEAT ACK chunk. + * If the transport's rto_pending variable has been cleared, + * it was most likely due to a retransmit. However, we want + * to re-enable it to properly update the rto. + */ + if (t->rto_pending == 0) + t->rto_pending = 1; + + hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; + sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); + + /* Update the heartbeat timer. */ + if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) + sctp_transport_hold(t); + + if (was_unconfirmed && asoc->peer.transport_count == 1) + sctp_transport_immediate_rtx(t); +} + + +/* Helper function to process the process SACK command. */ +static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + int err = 0; + + if (sctp_outq_sack(&asoc->outqueue, chunk)) { + struct net *net = sock_net(asoc->base.sk); + + /* There are no more TSNs awaiting SACK. */ + err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, + SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), + asoc->state, asoc->ep, asoc, NULL, + GFP_ATOMIC); + } + + return err; +} + +/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set + * the transport for a shutdown chunk. + */ +static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + struct sctp_transport *t; + + if (chunk->transport) + t = chunk->transport; + else { + t = sctp_assoc_choose_alter_transport(asoc, + asoc->shutdown_last_sent_to); + chunk->transport = t; + } + asoc->shutdown_last_sent_to = t; + asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; +} + +/* Helper function to change the state of an association. */ +static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + sctp_state_t state) +{ + struct sock *sk = asoc->base.sk; + + asoc->state = state; + + pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); + + if (sctp_style(sk, TCP)) { + /* Change the sk->sk_state of a TCP-style socket that has + * successfully completed a connect() call. + */ + if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) + sk->sk_state = SCTP_SS_ESTABLISHED; + + /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ + if (sctp_state(asoc, SHUTDOWN_RECEIVED) && + sctp_sstate(sk, ESTABLISHED)) + sk->sk_shutdown |= RCV_SHUTDOWN; + } + + if (sctp_state(asoc, COOKIE_WAIT)) { + /* Reset init timeouts since they may have been + * increased due to timer expirations. + */ + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = + asoc->rto_initial; + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = + asoc->rto_initial; + } + + if (sctp_state(asoc, ESTABLISHED) || + sctp_state(asoc, CLOSED) || + sctp_state(asoc, SHUTDOWN_RECEIVED)) { + /* Wake up any processes waiting in the asoc's wait queue in + * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). + */ + if (waitqueue_active(&asoc->wait)) + wake_up_interruptible(&asoc->wait); + + /* Wake up any processes waiting in the sk's sleep queue of + * a TCP-style or UDP-style peeled-off socket in + * sctp_wait_for_accept() or sctp_wait_for_packet(). + * For a UDP-style socket, the waiters are woken up by the + * notifications. + */ + if (!sctp_style(sk, UDP)) + sk->sk_state_change(sk); + } +} + +/* Helper function to delete an association. */ +static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc) +{ + struct sock *sk = asoc->base.sk; + + /* If it is a non-temporary association belonging to a TCP-style + * listening socket that is not closed, do not free it so that accept() + * can pick it up later. + */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && + (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) + return; + + sctp_unhash_established(asoc); + sctp_association_free(asoc); +} + +/* + * ADDIP Section 4.1 ASCONF Chunk Procedures + * A4) Start a T-4 RTO timer, using the RTO value of the selected + * destination address (we use active path instead of primary path just + * because primary path may be inactive. + */ +static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + struct sctp_transport *t; + + t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); + asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; + chunk->transport = t; +} + +/* Process an incoming Operation Error Chunk. */ +static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, + struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + struct sctp_errhdr *err_hdr; + struct sctp_ulpevent *ev; + + while (chunk->chunk_end > chunk->skb->data) { + err_hdr = (struct sctp_errhdr *)(chunk->skb->data); + + ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, + GFP_ATOMIC); + if (!ev) + return; + + sctp_ulpq_tail_event(&asoc->ulpq, ev); + + switch (err_hdr->cause) { + case SCTP_ERROR_UNKNOWN_CHUNK: + { + sctp_chunkhdr_t *unk_chunk_hdr; + + unk_chunk_hdr = (sctp_chunkhdr_t *)err_hdr->variable; + switch (unk_chunk_hdr->type) { + /* ADDIP 4.1 A9) If the peer responds to an ASCONF with + * an ERROR chunk reporting that it did not recognized + * the ASCONF chunk type, the sender of the ASCONF MUST + * NOT send any further ASCONF chunks and MUST stop its + * T-4 timer. + */ + case SCTP_CID_ASCONF: + if (asoc->peer.asconf_capable == 0) + break; + + asoc->peer.asconf_capable = 0; + sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + break; + default: + break; + } + break; + } + default: + break; + } + } +} + +/* Process variable FWDTSN chunk information. */ +static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, + struct sctp_chunk *chunk) +{ + struct sctp_fwdtsn_skip *skip; + /* Walk through all the skipped SSNs */ + sctp_walk_fwdtsn(skip, chunk) { + sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); + } +} + +/* Helper function to remove the association non-primary peer + * transports. + */ +static void sctp_cmd_del_non_primary(struct sctp_association *asoc) +{ + struct sctp_transport *t; + struct list_head *pos; + struct list_head *temp; + + list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { + t = list_entry(pos, struct sctp_transport, transports); + if (!sctp_cmp_addr_exact(&t->ipaddr, + &asoc->peer.primary_addr)) { + sctp_assoc_del_peer(asoc, &t->ipaddr); + } + } +} + +/* Helper function to set sk_err on a 1-1 style socket. */ +static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) +{ + struct sock *sk = asoc->base.sk; + + if (!sctp_style(sk, UDP)) + sk->sk_err = error; +} + +/* Helper function to generate an association change event */ +static void sctp_cmd_assoc_change(sctp_cmd_seq_t *commands, + struct sctp_association *asoc, + u8 state) +{ + struct sctp_ulpevent *ev; + + ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, + asoc->c.sinit_num_ostreams, + asoc->c.sinit_max_instreams, + NULL, GFP_ATOMIC); + if (ev) + sctp_ulpq_tail_event(&asoc->ulpq, ev); +} + +/* Helper function to generate an adaptation indication event */ +static void sctp_cmd_adaptation_ind(sctp_cmd_seq_t *commands, + struct sctp_association *asoc) +{ + struct sctp_ulpevent *ev; + + ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); + + if (ev) + sctp_ulpq_tail_event(&asoc->ulpq, ev); +} + + +static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, + sctp_event_timeout_t timer, + char *name) +{ + struct sctp_transport *t; + + t = asoc->init_last_sent_to; + asoc->init_err_counter++; + + if (t->init_sent_count > (asoc->init_cycle + 1)) { + asoc->timeouts[timer] *= 2; + if (asoc->timeouts[timer] > asoc->max_init_timeo) { + asoc->timeouts[timer] = asoc->max_init_timeo; + } + asoc->init_cycle++; + + pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" + " cycle:%d timeout:%ld\n", __func__, name, + asoc->init_err_counter, asoc->init_cycle, + asoc->timeouts[timer]); + } + +} + +/* Send the whole message, chunk by chunk, to the outqueue. + * This way the whole message is queued up and bundling if + * encouraged for small fragments. + */ +static int sctp_cmd_send_msg(struct sctp_association *asoc, + struct sctp_datamsg *msg) +{ + struct sctp_chunk *chunk; + int error = 0; + + list_for_each_entry(chunk, &msg->chunks, frag_list) { + error = sctp_outq_tail(&asoc->outqueue, chunk); + if (error) + break; + } + + return error; +} + + +/* Sent the next ASCONF packet currently stored in the association. + * This happens after the ASCONF_ACK was succeffully processed. + */ +static void sctp_cmd_send_asconf(struct sctp_association *asoc) +{ + struct net *net = sock_net(asoc->base.sk); + + /* Send the next asconf chunk from the addip chunk + * queue. + */ + if (!list_empty(&asoc->addip_chunk_list)) { + struct list_head *entry = asoc->addip_chunk_list.next; + struct sctp_chunk *asconf = list_entry(entry, + struct sctp_chunk, list); + list_del_init(entry); + + /* Hold the chunk until an ASCONF_ACK is received. */ + sctp_chunk_hold(asconf); + if (sctp_primitive_ASCONF(net, asoc, asconf)) + sctp_chunk_free(asconf); + else + asoc->addip_last_asconf = asconf; + } +} + + +/* These three macros allow us to pull the debugging code out of the + * main flow of sctp_do_sm() to keep attention focused on the real + * functionality there. + */ +#define debug_pre_sfn() \ + pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ + ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ + asoc, sctp_state_tbl[state], state_fn->name) + +#define debug_post_sfn() \ + pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ + sctp_status_tbl[status]) + +#define debug_post_sfx() \ + pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ + asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ + sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) + +/* + * This is the master state machine processing function. + * + * If you want to understand all of lksctp, this is a + * good place to start. + */ +int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype, + sctp_state_t state, + struct sctp_endpoint *ep, + struct sctp_association *asoc, + void *event_arg, + gfp_t gfp) +{ + sctp_cmd_seq_t commands; + const sctp_sm_table_entry_t *state_fn; + sctp_disposition_t status; + int error = 0; + typedef const char *(printfn_t)(sctp_subtype_t); + static printfn_t *table[] = { + NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, + }; + printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; + + /* Look up the state function, run it, and then process the + * side effects. These three steps are the heart of lksctp. + */ + state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); + + sctp_init_cmd_seq(&commands); + + debug_pre_sfn(); + status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); + debug_post_sfn(); + + error = sctp_side_effects(event_type, subtype, state, + ep, asoc, event_arg, status, + &commands, gfp); + debug_post_sfx(); + + return error; +} + +/***************************************************************** + * This the master state function side effect processing function. + *****************************************************************/ +static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, + sctp_state_t state, + struct sctp_endpoint *ep, + struct sctp_association *asoc, + void *event_arg, + sctp_disposition_t status, + sctp_cmd_seq_t *commands, + gfp_t gfp) +{ + int error; + + /* FIXME - Most of the dispositions left today would be categorized + * as "exceptional" dispositions. For those dispositions, it + * may not be proper to run through any of the commands at all. + * For example, the command interpreter might be run only with + * disposition SCTP_DISPOSITION_CONSUME. + */ + if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, + ep, asoc, + event_arg, status, + commands, gfp))) + goto bail; + + switch (status) { + case SCTP_DISPOSITION_DISCARD: + pr_debug("%s: ignored sctp protocol event - state:%d, " + "event_type:%d, event_id:%d\n", __func__, state, + event_type, subtype.chunk); + break; + + case SCTP_DISPOSITION_NOMEM: + /* We ran out of memory, so we need to discard this + * packet. + */ + /* BUG--we should now recover some memory, probably by + * reneging... + */ + error = -ENOMEM; + break; + + case SCTP_DISPOSITION_DELETE_TCB: + /* This should now be a command. */ + break; + + case SCTP_DISPOSITION_CONSUME: + case SCTP_DISPOSITION_ABORT: + /* + * We should no longer have much work to do here as the + * real work has been done as explicit commands above. + */ + break; + + case SCTP_DISPOSITION_VIOLATION: + net_err_ratelimited("protocol violation state %d chunkid %d\n", + state, subtype.chunk); + break; + + case SCTP_DISPOSITION_NOT_IMPL: + pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", + state, event_type, subtype.chunk); + break; + + case SCTP_DISPOSITION_BUG: + pr_err("bug in state %d, event_type %d, event_id %d\n", + state, event_type, subtype.chunk); + BUG(); + break; + + default: + pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", + status, state, event_type, subtype.chunk); + BUG(); + break; + } + +bail: + return error; +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* This is the side-effect interpreter. */ +static int sctp_cmd_interpreter(sctp_event_t event_type, + sctp_subtype_t subtype, + sctp_state_t state, + struct sctp_endpoint *ep, + struct sctp_association *asoc, + void *event_arg, + sctp_disposition_t status, + sctp_cmd_seq_t *commands, + gfp_t gfp) +{ + int error = 0; + int force; + sctp_cmd_t *cmd; + struct sctp_chunk *new_obj; + struct sctp_chunk *chunk = NULL; + struct sctp_packet *packet; + struct timer_list *timer; + unsigned long timeout; + struct sctp_transport *t; + struct sctp_sackhdr sackh; + int local_cork = 0; + + if (SCTP_EVENT_T_TIMEOUT != event_type) + chunk = event_arg; + + /* Note: This whole file is a huge candidate for rework. + * For example, each command could either have its own handler, so + * the loop would look like: + * while (cmds) + * cmd->handle(x, y, z) + * --jgrimm + */ + while (NULL != (cmd = sctp_next_cmd(commands))) { + switch (cmd->verb) { + case SCTP_CMD_NOP: + /* Do nothing. */ + break; + + case SCTP_CMD_NEW_ASOC: + /* Register a new association. */ + if (local_cork) { + sctp_outq_uncork(&asoc->outqueue); + local_cork = 0; + } + + /* Register with the endpoint. */ + asoc = cmd->obj.asoc; + BUG_ON(asoc->peer.primary_path == NULL); + sctp_endpoint_add_asoc(ep, asoc); + sctp_hash_established(asoc); + break; + + case SCTP_CMD_UPDATE_ASSOC: + sctp_assoc_update(asoc, cmd->obj.asoc); + break; + + case SCTP_CMD_PURGE_OUTQUEUE: + sctp_outq_teardown(&asoc->outqueue); + break; + + case SCTP_CMD_DELETE_TCB: + if (local_cork) { + sctp_outq_uncork(&asoc->outqueue); + local_cork = 0; + } + /* Delete the current association. */ + sctp_cmd_delete_tcb(commands, asoc); + asoc = NULL; + break; + + case SCTP_CMD_NEW_STATE: + /* Enter a new state. */ + sctp_cmd_new_state(commands, asoc, cmd->obj.state); + break; + + case SCTP_CMD_REPORT_TSN: + /* Record the arrival of a TSN. */ + error = sctp_tsnmap_mark(&asoc->peer.tsn_map, + cmd->obj.u32, NULL); + break; + + case SCTP_CMD_REPORT_FWDTSN: + /* Move the Cumulattive TSN Ack ahead. */ + sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); + + /* purge the fragmentation queue */ + sctp_ulpq_reasm_flushtsn(&asoc->ulpq, cmd->obj.u32); + + /* Abort any in progress partial delivery. */ + sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); + break; + + case SCTP_CMD_PROCESS_FWDTSN: + sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.chunk); + break; + + case SCTP_CMD_GEN_SACK: + /* Generate a Selective ACK. + * The argument tells us whether to just count + * the packet and MAYBE generate a SACK, or + * force a SACK out. + */ + force = cmd->obj.i32; + error = sctp_gen_sack(asoc, force, commands); + break; + + case SCTP_CMD_PROCESS_SACK: + /* Process an inbound SACK. */ + error = sctp_cmd_process_sack(commands, asoc, + cmd->obj.chunk); + break; + + case SCTP_CMD_GEN_INIT_ACK: + /* Generate an INIT ACK chunk. */ + new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, + 0); + if (!new_obj) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(new_obj)); + break; + + case SCTP_CMD_PEER_INIT: + /* Process a unified INIT from the peer. + * Note: Only used during INIT-ACK processing. If + * there is an error just return to the outter + * layer which will bail. + */ + error = sctp_cmd_process_init(commands, asoc, chunk, + cmd->obj.init, gfp); + break; + + case SCTP_CMD_GEN_COOKIE_ECHO: + /* Generate a COOKIE ECHO chunk. */ + new_obj = sctp_make_cookie_echo(asoc, chunk); + if (!new_obj) { + if (cmd->obj.chunk) + sctp_chunk_free(cmd->obj.chunk); + goto nomem; + } + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(new_obj)); + + /* If there is an ERROR chunk to be sent along with + * the COOKIE_ECHO, send it, too. + */ + if (cmd->obj.chunk) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(cmd->obj.chunk)); + + if (new_obj->transport) { + new_obj->transport->init_sent_count++; + asoc->init_last_sent_to = new_obj->transport; + } + + /* FIXME - Eventually come up with a cleaner way to + * enabling COOKIE-ECHO + DATA bundling during + * multihoming stale cookie scenarios, the following + * command plays with asoc->peer.retran_path to + * avoid the problem of sending the COOKIE-ECHO and + * DATA in different paths, which could result + * in the association being ABORTed if the DATA chunk + * is processed first by the server. Checking the + * init error counter simply causes this command + * to be executed only during failed attempts of + * association establishment. + */ + if ((asoc->peer.retran_path != + asoc->peer.primary_path) && + (asoc->init_err_counter > 0)) { + sctp_add_cmd_sf(commands, + SCTP_CMD_FORCE_PRIM_RETRAN, + SCTP_NULL()); + } + + break; + + case SCTP_CMD_GEN_SHUTDOWN: + /* Generate SHUTDOWN when in SHUTDOWN_SENT state. + * Reset error counts. + */ + asoc->overall_error_count = 0; + + /* Generate a SHUTDOWN chunk. */ + new_obj = sctp_make_shutdown(asoc, chunk); + if (!new_obj) + goto nomem; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(new_obj)); + break; + + case SCTP_CMD_CHUNK_ULP: + /* Send a chunk to the sockets layer. */ + pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", + __func__, cmd->obj.chunk, &asoc->ulpq); + + sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.chunk, + GFP_ATOMIC); + break; + + case SCTP_CMD_EVENT_ULP: + /* Send a notification to the sockets layer. */ + pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", + __func__, cmd->obj.ulpevent, &asoc->ulpq); + + sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ulpevent); + break; + + case SCTP_CMD_REPLY: + /* If an caller has not already corked, do cork. */ + if (!asoc->outqueue.cork) { + sctp_outq_cork(&asoc->outqueue); + local_cork = 1; + } + /* Send a chunk to our peer. */ + error = sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk); + break; + + case SCTP_CMD_SEND_PKT: + /* Send a full packet to our peer. */ + packet = cmd->obj.packet; + sctp_packet_transmit(packet); + sctp_ootb_pkt_free(packet); + break; + + case SCTP_CMD_T1_RETRAN: + /* Mark a transport for retransmission. */ + sctp_retransmit(&asoc->outqueue, cmd->obj.transport, + SCTP_RTXR_T1_RTX); + break; + + case SCTP_CMD_RETRAN: + /* Mark a transport for retransmission. */ + sctp_retransmit(&asoc->outqueue, cmd->obj.transport, + SCTP_RTXR_T3_RTX); + break; + + case SCTP_CMD_ECN_CE: + /* Do delayed CE processing. */ + sctp_do_ecn_ce_work(asoc, cmd->obj.u32); + break; + + case SCTP_CMD_ECN_ECNE: + /* Do delayed ECNE processing. */ + new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, + chunk); + if (new_obj) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(new_obj)); + break; + + case SCTP_CMD_ECN_CWR: + /* Do delayed CWR processing. */ + sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); + break; + + case SCTP_CMD_SETUP_T2: + sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); + break; + + case SCTP_CMD_TIMER_START_ONCE: + timer = &asoc->timers[cmd->obj.to]; + + if (timer_pending(timer)) + break; + /* fall through */ + + case SCTP_CMD_TIMER_START: + timer = &asoc->timers[cmd->obj.to]; + timeout = asoc->timeouts[cmd->obj.to]; + BUG_ON(!timeout); + + timer->expires = jiffies + timeout; + sctp_association_hold(asoc); + add_timer(timer); + break; + + case SCTP_CMD_TIMER_RESTART: + timer = &asoc->timers[cmd->obj.to]; + timeout = asoc->timeouts[cmd->obj.to]; + if (!mod_timer(timer, jiffies + timeout)) + sctp_association_hold(asoc); + break; + + case SCTP_CMD_TIMER_STOP: + timer = &asoc->timers[cmd->obj.to]; + if (del_timer(timer)) + sctp_association_put(asoc); + break; + + case SCTP_CMD_INIT_CHOOSE_TRANSPORT: + chunk = cmd->obj.chunk; + t = sctp_assoc_choose_alter_transport(asoc, + asoc->init_last_sent_to); + asoc->init_last_sent_to = t; + chunk->transport = t; + t->init_sent_count++; + /* Set the new transport as primary */ + sctp_assoc_set_primary(asoc, t); + break; + + case SCTP_CMD_INIT_RESTART: + /* Do the needed accounting and updates + * associated with restarting an initialization + * timer. Only multiply the timeout by two if + * all transports have been tried at the current + * timeout. + */ + sctp_cmd_t1_timer_update(asoc, + SCTP_EVENT_TIMEOUT_T1_INIT, + "INIT"); + + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + break; + + case SCTP_CMD_COOKIEECHO_RESTART: + /* Do the needed accounting and updates + * associated with restarting an initialization + * timer. Only multiply the timeout by two if + * all transports have been tried at the current + * timeout. + */ + sctp_cmd_t1_timer_update(asoc, + SCTP_EVENT_TIMEOUT_T1_COOKIE, + "COOKIE"); + + /* If we've sent any data bundled with + * COOKIE-ECHO we need to resend. + */ + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + sctp_retransmit_mark(&asoc->outqueue, t, + SCTP_RTXR_T1_RTX); + } + + sctp_add_cmd_sf(commands, + SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + break; + + case SCTP_CMD_INIT_FAILED: + sctp_cmd_init_failed(commands, asoc, cmd->obj.err); + break; + + case SCTP_CMD_ASSOC_FAILED: + sctp_cmd_assoc_failed(commands, asoc, event_type, + subtype, chunk, cmd->obj.err); + break; + + case SCTP_CMD_INIT_COUNTER_INC: + asoc->init_err_counter++; + break; + + case SCTP_CMD_INIT_COUNTER_RESET: + asoc->init_err_counter = 0; + asoc->init_cycle = 0; + list_for_each_entry(t, &asoc->peer.transport_addr_list, + transports) { + t->init_sent_count = 0; + } + break; + + case SCTP_CMD_REPORT_DUP: + sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, + cmd->obj.u32); + break; + + case SCTP_CMD_REPORT_BAD_TAG: + pr_debug("%s: vtag mismatch!\n", __func__); + break; + + case SCTP_CMD_STRIKE: + /* Mark one strike against a transport. */ + sctp_do_8_2_transport_strike(commands, asoc, + cmd->obj.transport, 0); + break; + + case SCTP_CMD_TRANSPORT_IDLE: + t = cmd->obj.transport; + sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); + break; + + case SCTP_CMD_TRANSPORT_HB_SENT: + t = cmd->obj.transport; + sctp_do_8_2_transport_strike(commands, asoc, + t, 1); + t->hb_sent = 1; + break; + + case SCTP_CMD_TRANSPORT_ON: + t = cmd->obj.transport; + sctp_cmd_transport_on(commands, asoc, t, chunk); + break; + + case SCTP_CMD_HB_TIMERS_START: + sctp_cmd_hb_timers_start(commands, asoc); + break; + + case SCTP_CMD_HB_TIMER_UPDATE: + t = cmd->obj.transport; + sctp_cmd_hb_timer_update(commands, t); + break; + + case SCTP_CMD_HB_TIMERS_STOP: + sctp_cmd_hb_timers_stop(commands, asoc); + break; + + case SCTP_CMD_REPORT_ERROR: + error = cmd->obj.error; + break; + + case SCTP_CMD_PROCESS_CTSN: + /* Dummy up a SACK for processing. */ + sackh.cum_tsn_ack = cmd->obj.be32; + sackh.a_rwnd = asoc->peer.rwnd + + asoc->outqueue.outstanding_bytes; + sackh.num_gap_ack_blocks = 0; + sackh.num_dup_tsns = 0; + chunk->subh.sack_hdr = &sackh; + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, + SCTP_CHUNK(chunk)); + break; + + case SCTP_CMD_DISCARD_PACKET: + /* We need to discard the whole packet. + * Uncork the queue since there might be + * responses pending + */ + chunk->pdiscard = 1; + if (asoc) { + sctp_outq_uncork(&asoc->outqueue); + local_cork = 0; + } + break; + + case SCTP_CMD_RTO_PENDING: + t = cmd->obj.transport; + t->rto_pending = 1; + break; + + case SCTP_CMD_PART_DELIVER: + sctp_ulpq_partial_delivery(&asoc->ulpq, GFP_ATOMIC); + break; + + case SCTP_CMD_RENEGE: + sctp_ulpq_renege(&asoc->ulpq, cmd->obj.chunk, + GFP_ATOMIC); + break; + + case SCTP_CMD_SETUP_T4: + sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); + break; + + case SCTP_CMD_PROCESS_OPERR: + sctp_cmd_process_operr(commands, asoc, chunk); + break; + case SCTP_CMD_CLEAR_INIT_TAG: + asoc->peer.i.init_tag = 0; + break; + case SCTP_CMD_DEL_NON_PRIMARY: + sctp_cmd_del_non_primary(asoc); + break; + case SCTP_CMD_T3_RTX_TIMERS_STOP: + sctp_cmd_t3_rtx_timers_stop(commands, asoc); + break; + case SCTP_CMD_FORCE_PRIM_RETRAN: + t = asoc->peer.retran_path; + asoc->peer.retran_path = asoc->peer.primary_path; + error = sctp_outq_uncork(&asoc->outqueue); + local_cork = 0; + asoc->peer.retran_path = t; + break; + case SCTP_CMD_SET_SK_ERR: + sctp_cmd_set_sk_err(asoc, cmd->obj.error); + break; + case SCTP_CMD_ASSOC_CHANGE: + sctp_cmd_assoc_change(commands, asoc, + cmd->obj.u8); + break; + case SCTP_CMD_ADAPTATION_IND: + sctp_cmd_adaptation_ind(commands, asoc); + break; + + case SCTP_CMD_ASSOC_SHKEY: + error = sctp_auth_asoc_init_active_key(asoc, + GFP_ATOMIC); + break; + case SCTP_CMD_UPDATE_INITTAG: + asoc->peer.i.init_tag = cmd->obj.u32; + break; + case SCTP_CMD_SEND_MSG: + if (!asoc->outqueue.cork) { + sctp_outq_cork(&asoc->outqueue); + local_cork = 1; + } + error = sctp_cmd_send_msg(asoc, cmd->obj.msg); + break; + case SCTP_CMD_SEND_NEXT_ASCONF: + sctp_cmd_send_asconf(asoc); + break; + case SCTP_CMD_PURGE_ASCONF_QUEUE: + sctp_asconf_queue_teardown(asoc); + break; + + case SCTP_CMD_SET_ASOC: + asoc = cmd->obj.asoc; + break; + + default: + pr_warn("Impossible command: %u\n", + cmd->verb); + break; + } + + if (error) + break; + } + +out: + /* If this is in response to a received chunk, wait until + * we are done with the packet to open the queue so that we don't + * send multiple packets in response to a single request. + */ + if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { + if (chunk->end_of_packet || chunk->singleton) + error = sctp_outq_uncork(&asoc->outqueue); + } else if (local_cork) + error = sctp_outq_uncork(&asoc->outqueue); + return error; +nomem: + error = -ENOMEM; + goto out; +} + diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c new file mode 100644 index 000000000..3ee27b770 --- /dev/null +++ b/net/sctp/sm_statefuns.c @@ -0,0 +1,6301 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2002 Intel Corp. + * Copyright (c) 2002 Nokia Corp. + * + * This is part of the SCTP Linux Kernel Implementation. + * + * These are the state functions for the state machine. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Mathew Kotowsky <kotowsky@sctp.org> + * Sridhar Samudrala <samudrala@us.ibm.com> + * Jon Grimm <jgrimm@us.ibm.com> + * Hui Huang <hui.huang@nokia.com> + * Dajiang Zhang <dajiang.zhang@nokia.com> + * Daisy Chang <daisyc@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + * Ryan Layer <rmlayer@us.ibm.com> + * Kevin Gao <kevin.gao@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/net.h> +#include <linux/inet.h> +#include <linux/slab.h> +#include <net/sock.h> +#include <net/inet_ecn.h> +#include <linux/skbuff.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> +#include <net/sctp/structs.h> + +static struct sctp_packet *sctp_abort_pkt_new(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + const void *payload, + size_t paylen); +static int sctp_eat_data(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands); +static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk); +static void sctp_send_stale_cookie_err(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_chunk *err_chunk); +static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); +static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); +static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); +static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); + +static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, + sctp_cmd_seq_t *commands, + __be16 error, int sk_err, + const struct sctp_association *asoc, + struct sctp_transport *transport); + +static sctp_disposition_t sctp_sf_abort_violation( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + void *arg, + sctp_cmd_seq_t *commands, + const __u8 *payload, + const size_t paylen); + +static sctp_disposition_t sctp_sf_violation_chunklen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); + +static sctp_disposition_t sctp_sf_violation_paramlen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, void *ext, + sctp_cmd_seq_t *commands); + +static sctp_disposition_t sctp_sf_violation_ctsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); + +static sctp_disposition_t sctp_sf_violation_chunk( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); + +static sctp_ierror_t sctp_sf_authenticate(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk); + +static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands); + +/* Small helper function that checks if the chunk length + * is of the appropriate length. The 'required_length' argument + * is set to be the size of a specific chunk we are testing. + * Return Values: 1 = Valid length + * 0 = Invalid length + * + */ +static inline int +sctp_chunk_length_valid(struct sctp_chunk *chunk, + __u16 required_length) +{ + __u16 chunk_length = ntohs(chunk->chunk_hdr->length); + + /* Previously already marked? */ + if (unlikely(chunk->pdiscard)) + return 0; + if (unlikely(chunk_length < required_length)) + return 0; + + return 1; +} + +/********************************************************** + * These are the state functions for handling chunk events. + **********************************************************/ + +/* + * Process the final SHUTDOWN COMPLETE. + * + * Section: 4 (C) (diagram), 9.2 + * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify + * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be + * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint + * should stop the T2-shutdown timer and remove all knowledge of the + * association (and thus the association enters the CLOSED state). + * + * Verification Tag: 8.5.1(C), sctpimpguide 2.41. + * C) Rules for packet carrying SHUTDOWN COMPLETE: + * ... + * - The receiver of a SHUTDOWN COMPLETE shall accept the packet + * if the Verification Tag field of the packet matches its own tag and + * the T bit is not set + * OR + * it is set to its peer's tag and the T bit is set in the Chunk + * Flags. + * Otherwise, the receiver MUST silently discard the packet + * and take no further action. An endpoint MUST ignore the + * SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_4_C(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_ulpevent *ev; + + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* RFC 2960 6.10 Bundling + * + * An endpoint MUST NOT bundle INIT, INIT ACK or + * SHUTDOWN COMPLETE with any other chunks. + */ + if (!chunk->singleton) + return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); + + /* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* RFC 2960 10.2 SCTP-to-ULP + * + * H) SHUTDOWN COMPLETE notification + * + * When SCTP completes the shutdown procedures (section 9.2) this + * notification is passed to the upper layer. + */ + ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, + 0, 0, 0, NULL, GFP_ATOMIC); + if (ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + + /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint + * will verify that it is in SHUTDOWN-ACK-SENT state, if it is + * not the chunk should be discarded. If the endpoint is in + * the SHUTDOWN-ACK-SENT state the endpoint should stop the + * T2-shutdown timer and remove all knowledge of the + * association (and thus the association enters the CLOSED + * state). + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + + SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + + return SCTP_DISPOSITION_DELETE_TCB; +} + +/* + * Respond to a normal INIT chunk. + * We are the side that is being asked for an association. + * + * Section: 5.1 Normal Establishment of an Association, B + * B) "Z" shall respond immediately with an INIT ACK chunk. The + * destination IP address of the INIT ACK MUST be set to the source + * IP address of the INIT to which this INIT ACK is responding. In + * the response, besides filling in other parameters, "Z" must set the + * Verification Tag field to Tag_A, and also provide its own + * Verification Tag (Tag_Z) in the Initiate Tag field. + * + * Verification Tag: Must be 0. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_chunk *repl; + struct sctp_association *new_asoc; + struct sctp_chunk *err_chunk; + struct sctp_packet *packet; + sctp_unrecognized_param_t *unk_param; + int len; + + /* 6.10 Bundling + * An endpoint MUST NOT bundle INIT, INIT ACK or + * SHUTDOWN COMPLETE with any other chunks. + * + * IG Section 2.11.2 + * Furthermore, we require that the receiver of an INIT chunk MUST + * enforce these rules by silently discarding an arriving packet + * with an INIT chunk that is bundled with other chunks. + */ + if (!chunk->singleton) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* If the packet is an OOTB packet which is temporarily on the + * control endpoint, respond with an ABORT. + */ + if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { + SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + } + + /* 3.1 A packet containing an INIT chunk MUST have a zero Verification + * Tag. + */ + if (chunk->sctp_hdr->vtag != 0) + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + + /* Make sure that the INIT chunk has a valid length. + * Normally, this would cause an ABORT with a Protocol Violation + * error, but since we don't have an association, we'll + * just discard the packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* If the INIT is coming toward a closing socket, we'll send back + * and ABORT. Essentially, this catches the race of INIT being + * backloged to the socket at the same time as the user isses close(). + * Since the socket and all its associations are going away, we + * can treat this OOTB + */ + if (sctp_sstate(ep->base.sk, CLOSING)) + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + + /* Verify the INIT chunk before processing it. */ + err_chunk = NULL; + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, + (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, + &err_chunk)) { + /* This chunk contains fatal error. It is to be discarded. + * Send an ABORT, with causes if there is any. + */ + if (err_chunk) { + packet = sctp_abort_pkt_new(net, ep, asoc, arg, + (__u8 *)(err_chunk->chunk_hdr) + + sizeof(sctp_chunkhdr_t), + ntohs(err_chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t)); + + sctp_chunk_free(err_chunk); + + if (packet) { + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + return SCTP_DISPOSITION_CONSUME; + } else { + return SCTP_DISPOSITION_NOMEM; + } + } else { + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, + commands); + } + } + + /* Grab the INIT header. */ + chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data; + + /* Tag the variable length parameters. */ + chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); + + new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); + if (!new_asoc) + goto nomem; + + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), + GFP_ATOMIC) < 0) + goto nomem_init; + + /* The call, sctp_process_init(), can fail on memory allocation. */ + if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), + (sctp_init_chunk_t *)chunk->chunk_hdr, + GFP_ATOMIC)) + goto nomem_init; + + /* B) "Z" shall respond immediately with an INIT ACK chunk. */ + + /* If there are errors need to be reported for unknown parameters, + * make sure to reserve enough room in the INIT ACK for them. + */ + len = 0; + if (err_chunk) + len = ntohs(err_chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t); + + repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); + if (!repl) + goto nomem_init; + + /* If there are errors need to be reported for unknown parameters, + * include them in the outgoing INIT ACK as "Unrecognized parameter" + * parameter. + */ + if (err_chunk) { + /* Get the "Unrecognized parameter" parameter(s) out of the + * ERROR chunk generated by sctp_verify_init(). Since the + * error cause code for "unknown parameter" and the + * "Unrecognized parameter" type is the same, we can + * construct the parameters in INIT ACK by copying the + * ERROR causes over. + */ + unk_param = (sctp_unrecognized_param_t *) + ((__u8 *)(err_chunk->chunk_hdr) + + sizeof(sctp_chunkhdr_t)); + /* Replace the cause code with the "Unrecognized parameter" + * parameter type. + */ + sctp_addto_chunk(repl, len, unk_param); + sctp_chunk_free(err_chunk); + } + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + + /* + * Note: After sending out INIT ACK with the State Cookie parameter, + * "Z" MUST NOT allocate any resources, nor keep any states for the + * new association. Otherwise, "Z" will be vulnerable to resource + * attacks. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + + return SCTP_DISPOSITION_DELETE_TCB; + +nomem_init: + sctp_association_free(new_asoc); +nomem: + if (err_chunk) + sctp_chunk_free(err_chunk); + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Respond to a normal INIT ACK chunk. + * We are the side that is initiating the association. + * + * Section: 5.1 Normal Establishment of an Association, C + * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init + * timer and leave COOKIE-WAIT state. "A" shall then send the State + * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start + * the T1-cookie timer, and enter the COOKIE-ECHOED state. + * + * Note: The COOKIE ECHO chunk can be bundled with any pending outbound + * DATA chunks, but it MUST be the first chunk in the packet and + * until the COOKIE ACK is returned the sender MUST NOT send any + * other packets to the peer. + * + * Verification Tag: 3.3.3 + * If the value of the Initiate Tag in a received INIT ACK chunk is + * found to be 0, the receiver MUST treat it as an error and close the + * association by transmitting an ABORT. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_init_chunk_t *initchunk; + struct sctp_chunk *err_chunk; + struct sctp_packet *packet; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* 6.10 Bundling + * An endpoint MUST NOT bundle INIT, INIT ACK or + * SHUTDOWN COMPLETE with any other chunks. + */ + if (!chunk->singleton) + return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands); + + /* Make sure that the INIT-ACK chunk has a valid length */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + /* Grab the INIT header. */ + chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; + + /* Verify the INIT chunk before processing it. */ + err_chunk = NULL; + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, + (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, + &err_chunk)) { + + sctp_error_t error = SCTP_ERROR_NO_RESOURCE; + + /* This chunk contains fatal error. It is to be discarded. + * Send an ABORT, with causes. If there are no causes, + * then there wasn't enough memory. Just terminate + * the association. + */ + if (err_chunk) { + packet = sctp_abort_pkt_new(net, ep, asoc, arg, + (__u8 *)(err_chunk->chunk_hdr) + + sizeof(sctp_chunkhdr_t), + ntohs(err_chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t)); + + sctp_chunk_free(err_chunk); + + if (packet) { + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + error = SCTP_ERROR_INV_PARAM; + } + } + + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destroy this association just because the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, + asoc, chunk->transport); + } + + /* Tag the variable length parameters. Note that we never + * convert the parameters in an INIT chunk. + */ + chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); + + initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr; + + sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT, + SCTP_PEER_INIT(initchunk)); + + /* Reset init error count upon receipt of INIT-ACK. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); + + /* 5.1 C) "A" shall stop the T1-init timer and leave + * COOKIE-WAIT state. "A" shall then ... start the T1-cookie + * timer, and enter the COOKIE-ECHOED state. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); + + /* SCTP-AUTH: genereate the assocition shared keys so that + * we can potentially signe the COOKIE-ECHO. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL()); + + /* 5.1 C) "A" shall then send the State Cookie received in the + * INIT ACK chunk in a COOKIE ECHO chunk, ... + */ + /* If there is any errors to report, send the ERROR chunk generated + * for unknown parameters as well. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO, + SCTP_CHUNK(err_chunk)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Respond to a normal COOKIE ECHO chunk. + * We are the side that is being asked for an association. + * + * Section: 5.1 Normal Establishment of an Association, D + * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply + * with a COOKIE ACK chunk after building a TCB and moving to + * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with + * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK + * chunk MUST be the first chunk in the packet. + * + * IMPLEMENTATION NOTE: An implementation may choose to send the + * Communication Up notification to the SCTP user upon reception + * of a valid COOKIE ECHO chunk. + * + * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules + * D) Rules for packet carrying a COOKIE ECHO + * + * - When sending a COOKIE ECHO, the endpoint MUST use the value of the + * Initial Tag received in the INIT ACK. + * + * - The receiver of a COOKIE ECHO follows the procedures in Section 5. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_association *new_asoc; + sctp_init_chunk_t *peer_init; + struct sctp_chunk *repl; + struct sctp_ulpevent *ev, *ai_ev = NULL; + int error = 0; + struct sctp_chunk *err_chk_p; + struct sock *sk; + + /* If the packet is an OOTB packet which is temporarily on the + * control endpoint, respond with an ABORT. + */ + if (ep == sctp_sk(net->sctp.ctl_sock)->ep) { + SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the COOKIE_ECHO chunk has a valid length. + * In this case, we check that we have enough for at least a + * chunk header. More detailed verification is done + * in sctp_unpack_cookie(). + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* If the endpoint is not listening or if the number of associations + * on the TCP-style socket exceed the max backlog, respond with an + * ABORT. + */ + sk = ep->base.sk; + if (!sctp_sstate(sk, LISTENING) || + (sctp_style(sk, TCP) && sk_acceptq_is_full(sk))) + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + + /* "Decode" the chunk. We have no optional parameters so we + * are in good shape. + */ + chunk->subh.cookie_hdr = + (struct sctp_signed_cookie *)chunk->skb->data; + if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t))) + goto nomem; + + /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint + * "Z" will reply with a COOKIE ACK chunk after building a TCB + * and moving to the ESTABLISHED state. + */ + new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, + &err_chk_p); + + /* FIXME: + * If the re-build failed, what is the proper error path + * from here? + * + * [We should abort the association. --piggy] + */ + if (!new_asoc) { + /* FIXME: Several errors are possible. A bad cookie should + * be silently discarded, but think about logging it too. + */ + switch (error) { + case -SCTP_IERROR_NOMEM: + goto nomem; + + case -SCTP_IERROR_STALE_COOKIE: + sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, + err_chk_p); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + case -SCTP_IERROR_BAD_SIG: + default: + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + } + + + /* Delay state machine commands until later. + * + * Re-build the bind address for the association is done in + * the sctp_unpack_cookie() already. + */ + /* This is a brand-new association, so these are not yet side + * effects--it is safe to run them here. + */ + peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; + + if (!sctp_process_init(new_asoc, chunk, + &chunk->subh.cookie_hdr->c.peer_addr, + peer_init, GFP_ATOMIC)) + goto nomem_init; + + /* SCTP-AUTH: Now that we've populate required fields in + * sctp_process_init, set up the assocaition shared keys as + * necessary so that we can potentially authenticate the ACK + */ + error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC); + if (error) + goto nomem_init; + + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo + * is supposed to be authenticated and we have to do delayed + * authentication. We've just recreated the association using + * the information in the cookie and now it's much easier to + * do the authentication. + */ + if (chunk->auth_chunk) { + struct sctp_chunk auth; + sctp_ierror_t ret; + + /* Make sure that we and the peer are AUTH capable */ + if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* set-up our fake chunk so that we can process it */ + auth.skb = chunk->auth_chunk; + auth.asoc = chunk->asoc; + auth.sctp_hdr = chunk->sctp_hdr; + auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk, + sizeof(sctp_chunkhdr_t)); + skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t)); + auth.transport = chunk->transport; + + ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); + if (ret != SCTP_IERROR_NO_ERROR) { + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + } + + repl = sctp_make_cookie_ack(new_asoc, chunk); + if (!repl) + goto nomem_init; + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * D) IMPLEMENTATION NOTE: An implementation may choose to + * send the Communication Up notification to the SCTP user + * upon reception of a valid COOKIE ECHO chunk. + */ + ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, + new_asoc->c.sinit_num_ostreams, + new_asoc->c.sinit_max_instreams, + NULL, GFP_ATOMIC); + if (!ev) + goto nomem_ev; + + /* Sockets API Draft Section 5.3.1.6 + * When a peer sends a Adaptation Layer Indication parameter , SCTP + * delivers this notification to inform the application that of the + * peers requested adaptation layer. + */ + if (new_asoc->peer.adaptation_ind) { + ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc, + GFP_ATOMIC); + if (!ai_ev) + goto nomem_aiev; + } + + /* Add all the state machine commands now since we've created + * everything. This way we don't introduce memory corruptions + * during side-effect processing and correclty count established + * associations. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); + + if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + + /* This will send the COOKIE ACK */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + + /* Queue the ASSOC_CHANGE event */ + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + + /* Send up the Adaptation Layer Indication event */ + if (ai_ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ai_ev)); + + return SCTP_DISPOSITION_CONSUME; + +nomem_aiev: + sctp_ulpevent_free(ev); +nomem_ev: + sctp_chunk_free(repl); +nomem_init: + sctp_association_free(new_asoc); +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Respond to a normal COOKIE ACK chunk. + * We are the side that is being asked for an association. + * + * RFC 2960 5.1 Normal Establishment of an Association + * + * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the + * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie + * timer. It may also notify its ULP about the successful + * establishment of the association with a Communication Up + * notification (see Section 10). + * + * Verification Tag: + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_ulpevent *ev; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Verify that the chunk length for the COOKIE-ACK is OK. + * If we don't do this, any bundled chunks may be junked. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Reset init error count upon receipt of COOKIE-ACK, + * to avoid problems with the managemement of this + * counter in stale cookie situations when a transition back + * from the COOKIE-ECHOED state to the COOKIE-WAIT + * state is performed. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL()); + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * E) Upon reception of the COOKIE ACK, endpoint "A" will move + * from the COOKIE-ECHOED state to the ESTABLISHED state, + * stopping the T1-cookie timer. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + + /* It may also notify its ULP about the successful + * establishment of the association with a Communication Up + * notification (see Section 10). + */ + ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, + 0, asoc->c.sinit_num_ostreams, + asoc->c.sinit_max_instreams, + NULL, GFP_ATOMIC); + + if (!ev) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + + /* Sockets API Draft Section 5.3.1.6 + * When a peer sends a Adaptation Layer Indication parameter , SCTP + * delivers this notification to inform the application that of the + * peers requested adaptation layer. + */ + if (asoc->peer.adaptation_ind) { + ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); + if (!ev) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + } + + return SCTP_DISPOSITION_CONSUME; +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* Generate and sendout a heartbeat packet. */ +static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_transport *transport = (struct sctp_transport *) arg; + struct sctp_chunk *reply; + + /* Send a heartbeat to our peer. */ + reply = sctp_make_heartbeat(asoc, transport); + if (!reply) + return SCTP_DISPOSITION_NOMEM; + + /* Set rto_pending indicating that an RTT measurement + * is started with this heartbeat chunk. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING, + SCTP_TRANSPORT(transport)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + return SCTP_DISPOSITION_CONSUME; +} + +/* Generate a HEARTBEAT packet on the given transport. */ +sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_transport *transport = (struct sctp_transport *) arg; + + if (asoc->overall_error_count >= asoc->max_retrans) { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_DELETE_TCB; + } + + /* Section 3.3.5. + * The Sender-specific Heartbeat Info field should normally include + * information about the sender's current time when this HEARTBEAT + * chunk is sent and the destination transport address to which this + * HEARTBEAT is sent (see Section 8.3). + */ + + if (transport->param_flags & SPP_HB_ENABLE) { + if (SCTP_DISPOSITION_NOMEM == + sctp_sf_heartbeat(ep, asoc, type, arg, + commands)) + return SCTP_DISPOSITION_NOMEM; + + /* Set transport error counter and association error counter + * when sending heartbeat. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, + SCTP_TRANSPORT(transport)); + } + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE, + SCTP_TRANSPORT(transport)); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, + SCTP_TRANSPORT(transport)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Process an heartbeat request. + * + * Section: 8.3 Path Heartbeat + * The receiver of the HEARTBEAT should immediately respond with a + * HEARTBEAT ACK that contains the Heartbeat Information field copied + * from the received HEARTBEAT chunk. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * When receiving an SCTP packet, the endpoint MUST ensure that the + * value in the Verification Tag field of the received SCTP packet + * matches its own Tag. If the received Verification Tag value does not + * match the receiver's own tag value, the receiver shall silently + * discard the packet and shall not process it any further except for + * those cases listed in Section 8.5.1 below. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_beat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_paramhdr_t *param_hdr; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *reply; + size_t paylen = 0; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the HEARTBEAT chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* 8.3 The receiver of the HEARTBEAT should immediately + * respond with a HEARTBEAT ACK that contains the Heartbeat + * Information field copied from the received HEARTBEAT chunk. + */ + chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; + param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr; + paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); + + if (ntohs(param_hdr->length) > paylen) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + param_hdr, commands); + + if (!pskb_pull(chunk->skb, paylen)) + goto nomem; + + reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen); + if (!reply) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Process the returning HEARTBEAT ACK. + * + * Section: 8.3 Path Heartbeat + * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT + * should clear the error counter of the destination transport + * address to which the HEARTBEAT was sent, and mark the destination + * transport address as active if it is not so marked. The endpoint may + * optionally report to the upper layer when an inactive destination + * address is marked as active due to the reception of the latest + * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also + * clear the association overall error count as well (as defined + * in section 8.1). + * + * The receiver of the HEARTBEAT ACK should also perform an RTT + * measurement for that destination transport address using the time + * value carried in the HEARTBEAT ACK chunk. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + union sctp_addr from_addr; + struct sctp_transport *link; + sctp_sender_hb_info_t *hbinfo; + unsigned long max_interval; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) + + sizeof(sctp_sender_hb_info_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; + /* Make sure that the length of the parameter is what we expect */ + if (ntohs(hbinfo->param_hdr.length) != + sizeof(sctp_sender_hb_info_t)) { + return SCTP_DISPOSITION_DISCARD; + } + + from_addr = hbinfo->daddr; + link = sctp_assoc_lookup_paddr(asoc, &from_addr); + + /* This should never happen, but lets log it if so. */ + if (unlikely(!link)) { + if (from_addr.sa.sa_family == AF_INET6) { + net_warn_ratelimited("%s association %p could not find address %pI6\n", + __func__, + asoc, + &from_addr.v6.sin6_addr); + } else { + net_warn_ratelimited("%s association %p could not find address %pI4\n", + __func__, + asoc, + &from_addr.v4.sin_addr.s_addr); + } + return SCTP_DISPOSITION_DISCARD; + } + + /* Validate the 64-bit random nonce. */ + if (hbinfo->hb_nonce != link->hb_nonce) + return SCTP_DISPOSITION_DISCARD; + + max_interval = link->hbinterval + link->rto; + + /* Check if the timestamp looks valid. */ + if (time_after(hbinfo->sent_at, jiffies) || + time_after(jiffies, hbinfo->sent_at + max_interval)) { + pr_debug("%s: HEARTBEAT ACK with invalid timestamp received " + "for transport:%p\n", __func__, link); + + return SCTP_DISPOSITION_DISCARD; + } + + /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of + * the HEARTBEAT should clear the error counter of the + * destination transport address to which the HEARTBEAT was + * sent and mark the destination transport address as active if + * it is not so marked. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* Helper function to send out an abort for the restart + * condition. + */ +static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa, + struct sctp_chunk *init, + sctp_cmd_seq_t *commands) +{ + int len; + struct sctp_packet *pkt; + union sctp_addr_param *addrparm; + struct sctp_errhdr *errhdr; + struct sctp_endpoint *ep; + char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)]; + struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); + + /* Build the error on the stack. We are way to malloc crazy + * throughout the code today. + */ + errhdr = (struct sctp_errhdr *)buffer; + addrparm = (union sctp_addr_param *)errhdr->variable; + + /* Copy into a parm format. */ + len = af->to_addr_param(ssa, addrparm); + len += sizeof(sctp_errhdr_t); + + errhdr->cause = SCTP_ERROR_RESTART; + errhdr->length = htons(len); + + /* Assign to the control socket. */ + ep = sctp_sk(net->sctp.ctl_sock)->ep; + + /* Association is NULL since this may be a restart attack and we + * want to send back the attacker's vtag. + */ + pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len); + + if (!pkt) + goto out; + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); + + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + + /* Discard the rest of the inbound packet. */ + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); + +out: + /* Even if there is no memory, treat as a failure so + * the packet will get dropped. + */ + return 0; +} + +static bool list_has_sctp_addr(const struct list_head *list, + union sctp_addr *ipaddr) +{ + struct sctp_transport *addr; + + list_for_each_entry(addr, list, transports) { + if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr)) + return true; + } + + return false; +} +/* A restart is occurring, check to make sure no new addresses + * are being added as we may be under a takeover attack. + */ +static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, + const struct sctp_association *asoc, + struct sctp_chunk *init, + sctp_cmd_seq_t *commands) +{ + struct net *net = sock_net(new_asoc->base.sk); + struct sctp_transport *new_addr; + int ret = 1; + + /* Implementor's Guide - Section 5.2.2 + * ... + * Before responding the endpoint MUST check to see if the + * unexpected INIT adds new addresses to the association. If new + * addresses are added to the association, the endpoint MUST respond + * with an ABORT.. + */ + + /* Search through all current addresses and make sure + * we aren't adding any new ones. + */ + list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list, + transports) { + if (!list_has_sctp_addr(&asoc->peer.transport_addr_list, + &new_addr->ipaddr)) { + sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init, + commands); + ret = 0; + break; + } + } + + /* Return success if all addresses were found. */ + return ret; +} + +/* Populate the verification/tie tags based on overlapping INIT + * scenario. + * + * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state. + */ +static void sctp_tietags_populate(struct sctp_association *new_asoc, + const struct sctp_association *asoc) +{ + switch (asoc->state) { + + /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */ + + case SCTP_STATE_COOKIE_WAIT: + new_asoc->c.my_vtag = asoc->c.my_vtag; + new_asoc->c.my_ttag = asoc->c.my_vtag; + new_asoc->c.peer_ttag = 0; + break; + + case SCTP_STATE_COOKIE_ECHOED: + new_asoc->c.my_vtag = asoc->c.my_vtag; + new_asoc->c.my_ttag = asoc->c.my_vtag; + new_asoc->c.peer_ttag = asoc->c.peer_vtag; + break; + + /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED, + * COOKIE-WAIT and SHUTDOWN-ACK-SENT + */ + default: + new_asoc->c.my_ttag = asoc->c.my_vtag; + new_asoc->c.peer_ttag = asoc->c.peer_vtag; + break; + } + + /* Other parameters for the endpoint SHOULD be copied from the + * existing parameters of the association (e.g. number of + * outbound streams) into the INIT ACK and cookie. + */ + new_asoc->rwnd = asoc->rwnd; + new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams; + new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams; + new_asoc->c.initial_tsn = asoc->c.initial_tsn; +} + +/* + * Compare vtag/tietag values to determine unexpected COOKIE-ECHO + * handling action. + * + * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists. + * + * Returns value representing action to be taken. These action values + * correspond to Action/Description values in RFC 2960, Table 2. + */ +static char sctp_tietags_compare(struct sctp_association *new_asoc, + const struct sctp_association *asoc) +{ + /* In this case, the peer may have restarted. */ + if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && + (asoc->c.peer_vtag != new_asoc->c.peer_vtag) && + (asoc->c.my_vtag == new_asoc->c.my_ttag) && + (asoc->c.peer_vtag == new_asoc->c.peer_ttag)) + return 'A'; + + /* Collision case B. */ + if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && + ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) || + (0 == asoc->c.peer_vtag))) { + return 'B'; + } + + /* Collision case D. */ + if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && + (asoc->c.peer_vtag == new_asoc->c.peer_vtag)) + return 'D'; + + /* Collision case C. */ + if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && + (asoc->c.peer_vtag == new_asoc->c.peer_vtag) && + (0 == new_asoc->c.my_ttag) && + (0 == new_asoc->c.peer_ttag)) + return 'C'; + + /* No match to any of the special cases; discard this packet. */ + return 'E'; +} + +/* Common helper routine for both duplicate and simulataneous INIT + * chunk handling. + */ +static sctp_disposition_t sctp_sf_do_unexpected_init( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, sctp_cmd_seq_t *commands) +{ + sctp_disposition_t retval; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *repl; + struct sctp_association *new_asoc; + struct sctp_chunk *err_chunk; + struct sctp_packet *packet; + sctp_unrecognized_param_t *unk_param; + int len; + + /* 6.10 Bundling + * An endpoint MUST NOT bundle INIT, INIT ACK or + * SHUTDOWN COMPLETE with any other chunks. + * + * IG Section 2.11.2 + * Furthermore, we require that the receiver of an INIT chunk MUST + * enforce these rules by silently discarding an arriving packet + * with an INIT chunk that is bundled with other chunks. + */ + if (!chunk->singleton) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* 3.1 A packet containing an INIT chunk MUST have a zero Verification + * Tag. + */ + if (chunk->sctp_hdr->vtag != 0) + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); + + /* Make sure that the INIT chunk has a valid length. + * In this case, we generate a protocol violation since we have + * an association established. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + /* Grab the INIT header. */ + chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; + + /* Tag the variable length parameters. */ + chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); + + /* Verify the INIT chunk before processing it. */ + err_chunk = NULL; + if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type, + (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, + &err_chunk)) { + /* This chunk contains fatal error. It is to be discarded. + * Send an ABORT, with causes if there is any. + */ + if (err_chunk) { + packet = sctp_abort_pkt_new(net, ep, asoc, arg, + (__u8 *)(err_chunk->chunk_hdr) + + sizeof(sctp_chunkhdr_t), + ntohs(err_chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t)); + + if (packet) { + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + retval = SCTP_DISPOSITION_CONSUME; + } else { + retval = SCTP_DISPOSITION_NOMEM; + } + goto cleanup; + } else { + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, + commands); + } + } + + /* + * Other parameters for the endpoint SHOULD be copied from the + * existing parameters of the association (e.g. number of + * outbound streams) into the INIT ACK and cookie. + * FIXME: We are copying parameters from the endpoint not the + * association. + */ + new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); + if (!new_asoc) + goto nomem; + + if (sctp_assoc_set_bind_addr_from_ep(new_asoc, + sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0) + goto nomem; + + /* In the outbound INIT ACK the endpoint MUST copy its current + * Verification Tag and Peers Verification tag into a reserved + * place (local tie-tag and per tie-tag) within the state cookie. + */ + if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), + (sctp_init_chunk_t *)chunk->chunk_hdr, + GFP_ATOMIC)) + goto nomem; + + /* Make sure no new addresses are being added during the + * restart. Do not do this check for COOKIE-WAIT state, + * since there are no peer addresses to check against. + * Upon return an ABORT will have been sent if needed. + */ + if (!sctp_state(asoc, COOKIE_WAIT)) { + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, + commands)) { + retval = SCTP_DISPOSITION_CONSUME; + goto nomem_retval; + } + } + + sctp_tietags_populate(new_asoc, asoc); + + /* B) "Z" shall respond immediately with an INIT ACK chunk. */ + + /* If there are errors need to be reported for unknown parameters, + * make sure to reserve enough room in the INIT ACK for them. + */ + len = 0; + if (err_chunk) { + len = ntohs(err_chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t); + } + + repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); + if (!repl) + goto nomem; + + /* If there are errors need to be reported for unknown parameters, + * include them in the outgoing INIT ACK as "Unrecognized parameter" + * parameter. + */ + if (err_chunk) { + /* Get the "Unrecognized parameter" parameter(s) out of the + * ERROR chunk generated by sctp_verify_init(). Since the + * error cause code for "unknown parameter" and the + * "Unrecognized parameter" type is the same, we can + * construct the parameters in INIT ACK by copying the + * ERROR causes over. + */ + unk_param = (sctp_unrecognized_param_t *) + ((__u8 *)(err_chunk->chunk_hdr) + + sizeof(sctp_chunkhdr_t)); + /* Replace the cause code with the "Unrecognized parameter" + * parameter type. + */ + sctp_addto_chunk(repl, len, unk_param); + } + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + + /* + * Note: After sending out INIT ACK with the State Cookie parameter, + * "Z" MUST NOT allocate any resources for this new association. + * Otherwise, "Z" will be vulnerable to resource attacks. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + retval = SCTP_DISPOSITION_CONSUME; + + return retval; + +nomem: + retval = SCTP_DISPOSITION_NOMEM; +nomem_retval: + if (new_asoc) + sctp_association_free(new_asoc); +cleanup: + if (err_chunk) + sctp_chunk_free(err_chunk); + return retval; +} + +/* + * Handle simultaneous INIT. + * This means we started an INIT and then we got an INIT request from + * our peer. + * + * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B) + * This usually indicates an initialization collision, i.e., each + * endpoint is attempting, at about the same time, to establish an + * association with the other endpoint. + * + * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an + * endpoint MUST respond with an INIT ACK using the same parameters it + * sent in its original INIT chunk (including its Verification Tag, + * unchanged). These original parameters are combined with those from the + * newly received INIT chunk. The endpoint shall also generate a State + * Cookie with the INIT ACK. The endpoint uses the parameters sent in its + * INIT to calculate the State Cookie. + * + * After that, the endpoint MUST NOT change its state, the T1-init + * timer shall be left running and the corresponding TCB MUST NOT be + * destroyed. The normal procedures for handling State Cookies when + * a TCB exists will resolve the duplicate INITs to a single association. + * + * For an endpoint that is in the COOKIE-ECHOED state it MUST populate + * its Tie-Tags with the Tag information of itself and its peer (see + * section 5.2.2 for a description of the Tie-Tags). + * + * Verification Tag: Not explicit, but an INIT can not have a valid + * verification tag, so we skip the check. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* Call helper to do the real work for both simulataneous and + * duplicate INIT chunk handling. + */ + return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); +} + +/* + * Handle duplicated INIT messages. These are usually delayed + * restransmissions. + * + * Section: 5.2.2 Unexpected INIT in States Other than CLOSED, + * COOKIE-ECHOED and COOKIE-WAIT + * + * Unless otherwise stated, upon reception of an unexpected INIT for + * this association, the endpoint shall generate an INIT ACK with a + * State Cookie. In the outbound INIT ACK the endpoint MUST copy its + * current Verification Tag and peer's Verification Tag into a reserved + * place within the state cookie. We shall refer to these locations as + * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet + * containing this INIT ACK MUST carry a Verification Tag value equal to + * the Initiation Tag found in the unexpected INIT. And the INIT ACK + * MUST contain a new Initiation Tag (randomly generated see Section + * 5.3.1). Other parameters for the endpoint SHOULD be copied from the + * existing parameters of the association (e.g. number of outbound + * streams) into the INIT ACK and cookie. + * + * After sending out the INIT ACK, the endpoint shall take no further + * actions, i.e., the existing association, including its current state, + * and the corresponding TCB MUST NOT be changed. + * + * Note: Only when a TCB exists and the association is not in a COOKIE- + * WAIT state are the Tie-Tags populated. For a normal association INIT + * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be + * set to 0 (indicating that no previous TCB existed). The INIT ACK and + * State Cookie are populated as specified in section 5.2.1. + * + * Verification Tag: Not specified, but an INIT has no way of knowing + * what the verification tag could be, so we ignore it. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* Call helper to do the real work for both simulataneous and + * duplicate INIT chunk handling. + */ + return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands); +} + + +/* + * Unexpected INIT-ACK handler. + * + * Section 5.2.3 + * If an INIT ACK received by an endpoint in any state other than the + * COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk. + * An unexpected INIT ACK usually indicates the processing of an old or + * duplicated INIT chunk. +*/ +sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, sctp_cmd_seq_t *commands) +{ + /* Per the above section, we'll discard the chunk if we have an + * endpoint. If this is an OOTB INIT-ACK, treat it as such. + */ + if (ep == sctp_sk(net->sctp.ctl_sock)->ep) + return sctp_sf_ootb(net, ep, asoc, type, arg, commands); + else + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); +} + +/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A') + * + * Section 5.2.4 + * A) In this case, the peer may have restarted. + */ +static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_association *new_asoc) +{ + sctp_init_chunk_t *peer_init; + struct sctp_ulpevent *ev; + struct sctp_chunk *repl; + struct sctp_chunk *err; + sctp_disposition_t disposition; + + /* new_asoc is a brand-new association, so these are not yet + * side effects--it is safe to run them here. + */ + peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; + + if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, + GFP_ATOMIC)) + goto nomem; + + /* Make sure no new addresses are being added during the + * restart. Though this is a pretty complicated attack + * since you'd have to get inside the cookie. + */ + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { + return SCTP_DISPOSITION_CONSUME; + } + + /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes + * the peer has restarted (Action A), it MUST NOT setup a new + * association but instead resend the SHUTDOWN ACK and send an ERROR + * chunk with a "Cookie Received while Shutting Down" error cause to + * its peer. + */ + if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { + disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc, + SCTP_ST_CHUNK(chunk->chunk_hdr->type), + chunk, commands); + if (SCTP_DISPOSITION_NOMEM == disposition) + goto nomem; + + err = sctp_make_op_error(asoc, chunk, + SCTP_ERROR_COOKIE_IN_SHUTDOWN, + NULL, 0, 0); + if (err) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + + return SCTP_DISPOSITION_CONSUME; + } + + /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked + * data. Consider the optional choice of resending of this data. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); + sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); + + /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue + * and ASCONF-ACK cache. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL()); + + repl = sctp_make_cookie_ack(new_asoc, chunk); + if (!repl) + goto nomem; + + /* Report association restart to upper layer. */ + ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, + new_asoc->c.sinit_num_ostreams, + new_asoc->c.sinit_max_instreams, + NULL, GFP_ATOMIC); + if (!ev) + goto nomem_ev; + + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + if (sctp_state(asoc, SHUTDOWN_PENDING) && + (sctp_sstate(asoc->base.sk, CLOSING) || + sock_flag(asoc->base.sk, SOCK_DEAD))) { + /* if were currently in SHUTDOWN_PENDING, but the socket + * has been closed by user, don't transition to ESTABLISHED. + * Instead trigger SHUTDOWN bundled with COOKIE_ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return sctp_sf_do_9_2_start_shutdown(net, ep, asoc, + SCTP_ST_CHUNK(0), NULL, + commands); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + } + return SCTP_DISPOSITION_CONSUME; + +nomem_ev: + sctp_chunk_free(repl); +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B') + * + * Section 5.2.4 + * B) In this case, both sides may be attempting to start an association + * at about the same time but the peer endpoint started its INIT + * after responding to the local endpoint's INIT + */ +/* This case represents an initialization collision. */ +static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_association *new_asoc) +{ + sctp_init_chunk_t *peer_init; + struct sctp_chunk *repl; + + /* new_asoc is a brand-new association, so these are not yet + * side effects--it is safe to run them here. + */ + peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; + if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init, + GFP_ATOMIC)) + goto nomem; + + /* Update the content of current association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); + + repl = sctp_make_cookie_ack(new_asoc, chunk); + if (!repl) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * D) IMPLEMENTATION NOTE: An implementation may choose to + * send the Communication Up notification to the SCTP user + * upon reception of a valid COOKIE ECHO chunk. + * + * Sadly, this needs to be implemented as a side-effect, because + * we are not guaranteed to have set the association id of the real + * association and so these notifications need to be delayed until + * the association id is allocated. + */ + + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP)); + + /* Sockets API Draft Section 5.3.1.6 + * When a peer sends a Adaptation Layer Indication parameter , SCTP + * delivers this notification to inform the application that of the + * peers requested adaptation layer. + * + * This also needs to be done as a side effect for the same reason as + * above. + */ + if (asoc->peer.adaptation_ind) + sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL()); + + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C') + * + * Section 5.2.4 + * C) In this case, the local endpoint's cookie has arrived late. + * Before it arrived, the local endpoint sent an INIT and received an + * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag + * but a new tag of its own. + */ +/* This case represents an initialization collision. */ +static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_association *new_asoc) +{ + /* The cookie should be silently discarded. + * The endpoint SHOULD NOT change states and should leave + * any timers running. + */ + return SCTP_DISPOSITION_DISCARD; +} + +/* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D') + * + * Section 5.2.4 + * + * D) When both local and remote tags match the endpoint should always + * enter the ESTABLISHED state, if it has not already done so. + */ +/* This case represents an initialization collision. */ +static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_association *new_asoc) +{ + struct sctp_ulpevent *ev = NULL, *ai_ev = NULL; + struct sctp_chunk *repl; + + /* Clarification from Implementor's Guide: + * D) When both local and remote tags match the endpoint should + * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state. + * It should stop any cookie timer that may be running and send + * a COOKIE ACK. + */ + + /* Don't accidentally move back into established state. */ + if (asoc->state < SCTP_STATE_ESTABLISHED) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_ESTABLISHED)); + SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, + SCTP_NULL()); + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * D) IMPLEMENTATION NOTE: An implementation may choose + * to send the Communication Up notification to the + * SCTP user upon reception of a valid COOKIE + * ECHO chunk. + */ + ev = sctp_ulpevent_make_assoc_change(asoc, 0, + SCTP_COMM_UP, 0, + asoc->c.sinit_num_ostreams, + asoc->c.sinit_max_instreams, + NULL, GFP_ATOMIC); + if (!ev) + goto nomem; + + /* Sockets API Draft Section 5.3.1.6 + * When a peer sends a Adaptation Layer Indication parameter, + * SCTP delivers this notification to inform the application + * that of the peers requested adaptation layer. + */ + if (asoc->peer.adaptation_ind) { + ai_ev = sctp_ulpevent_make_adaptation_indication(asoc, + GFP_ATOMIC); + if (!ai_ev) + goto nomem; + + } + } + + repl = sctp_make_cookie_ack(new_asoc, chunk); + if (!repl) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + + if (ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + if (ai_ev) + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ai_ev)); + + return SCTP_DISPOSITION_CONSUME; + +nomem: + if (ai_ev) + sctp_ulpevent_free(ai_ev); + if (ev) + sctp_ulpevent_free(ev); + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying + * chunk was retransmitted and then delayed in the network. + * + * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists + * + * Verification Tag: None. Do cookie validation. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_disposition_t retval; + struct sctp_chunk *chunk = arg; + struct sctp_association *new_asoc; + int error = 0; + char action; + struct sctp_chunk *err_chk_p; + + /* Make sure that the chunk has a valid length from the protocol + * perspective. In this case check to make sure we have at least + * enough for the chunk header. Cookie length verification is + * done later. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* "Decode" the chunk. We have no optional parameters so we + * are in good shape. + */ + chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; + if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t))) + goto nomem; + + /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie + * of a duplicate COOKIE ECHO match the Verification Tags of the + * current association, consider the State Cookie valid even if + * the lifespan is exceeded. + */ + new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, + &err_chk_p); + + /* FIXME: + * If the re-build failed, what is the proper error path + * from here? + * + * [We should abort the association. --piggy] + */ + if (!new_asoc) { + /* FIXME: Several errors are possible. A bad cookie should + * be silently discarded, but think about logging it too. + */ + switch (error) { + case -SCTP_IERROR_NOMEM: + goto nomem; + + case -SCTP_IERROR_STALE_COOKIE: + sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands, + err_chk_p); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + case -SCTP_IERROR_BAD_SIG: + default: + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + } + + /* Compare the tie_tag in cookie with the verification tag of + * current association. + */ + action = sctp_tietags_compare(new_asoc, asoc); + + switch (action) { + case 'A': /* Association restart. */ + retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands, + new_asoc); + break; + + case 'B': /* Collision case B. */ + retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands, + new_asoc); + break; + + case 'C': /* Collision case C. */ + retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands, + new_asoc); + break; + + case 'D': /* Collision case D. */ + retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands, + new_asoc); + break; + + default: /* Discard packet for all others. */ + retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + break; + } + + /* Delete the tempory new association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc)); + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + + /* Restore association pointer to provide SCTP command interpeter + * with a valid context in case it needs to manipulate + * the queues */ + sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, + SCTP_ASOC((struct sctp_association *)asoc)); + + return retval; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Process an ABORT. (SHUTDOWN-PENDING state) + * + * See sctp_sf_do_9_1_abort(). + */ +sctp_disposition_t sctp_sf_shutdown_pending_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ABORT chunk has a valid length. + * Since this is an ABORT chunk, we have to discard it + * because of the following text: + * RFC 2960, Section 3.3.7 + * If an endpoint receives an ABORT with a format error or for an + * association that doesn't exist, it MUST silently discard it. + * Because the length is "invalid", we can't really discard just + * as we do not know its true length. So, to be safe, discard the + * packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* ADD-IP: Special case for ABORT chunks + * F4) One special consideration is that ABORT Chunks arriving + * destined to the IP address being deleted MUST be + * ignored (see Section 5.3.1 for further details). + */ + if (SCTP_ADDR_DEL == + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); +} + +/* + * Process an ABORT. (SHUTDOWN-SENT state) + * + * See sctp_sf_do_9_1_abort(). + */ +sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ABORT chunk has a valid length. + * Since this is an ABORT chunk, we have to discard it + * because of the following text: + * RFC 2960, Section 3.3.7 + * If an endpoint receives an ABORT with a format error or for an + * association that doesn't exist, it MUST silently discard it. + * Because the length is "invalid", we can't really discard just + * as we do not know its true length. So, to be safe, discard the + * packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* ADD-IP: Special case for ABORT chunks + * F4) One special consideration is that ABORT Chunks arriving + * destined to the IP address being deleted MUST be + * ignored (see Section 5.3.1 for further details). + */ + if (SCTP_ADDR_DEL == + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + + /* Stop the T2-shutdown timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + /* Stop the T5-shutdown guard timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); +} + +/* + * Process an ABORT. (SHUTDOWN-ACK-SENT state) + * + * See sctp_sf_do_9_1_abort(). + */ +sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* The same T2 timer, so we should be able to use + * common function with the SHUTDOWN-SENT state. + */ + return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands); +} + +/* + * Handle an Error received in COOKIE_ECHOED state. + * + * Only handle the error type of stale COOKIE Error, the other errors will + * be ignored. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_errhdr_t *err; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ERROR chunk has a valid length. + * The parameter walking depends on this as well. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Process the error here */ + /* FUTURE FIXME: When PR-SCTP related and other optional + * parms are emitted, this will have to change to handle multiple + * errors. + */ + sctp_walk_errors(err, chunk->chunk_hdr) { + if (SCTP_ERROR_STALE_COOKIE == err->cause) + return sctp_sf_do_5_2_6_stale(net, ep, asoc, type, + arg, commands); + } + + /* It is possible to have malformed error causes, and that + * will cause us to end the walk early. However, since + * we are discarding the packet, there should be no adverse + * affects. + */ + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); +} + +/* + * Handle a Stale COOKIE Error + * + * Section: 5.2.6 Handle Stale COOKIE Error + * If the association is in the COOKIE-ECHOED state, the endpoint may elect + * one of the following three alternatives. + * ... + * 3) Send a new INIT chunk to the endpoint, adding a Cookie + * Preservative parameter requesting an extension to the lifetime of + * the State Cookie. When calculating the time extension, an + * implementation SHOULD use the RTT information measured based on the + * previous COOKIE ECHO / ERROR exchange, and should add no more + * than 1 second beyond the measured RTT, due to long State Cookie + * lifetimes making the endpoint more subject to a replay attack. + * + * Verification Tag: Not explicit, but safe to ignore. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + time_t stale; + sctp_cookie_preserve_param_t bht; + sctp_errhdr_t *err; + struct sctp_chunk *reply; + struct sctp_bind_addr *bp; + int attempts = asoc->init_err_counter + 1; + + if (attempts > asoc->max_init_attempts) { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_STALE_COOKIE)); + return SCTP_DISPOSITION_DELETE_TCB; + } + + err = (sctp_errhdr_t *)(chunk->skb->data); + + /* When calculating the time extension, an implementation + * SHOULD use the RTT information measured based on the + * previous COOKIE ECHO / ERROR exchange, and should add no + * more than 1 second beyond the measured RTT, due to long + * State Cookie lifetimes making the endpoint more subject to + * a replay attack. + * Measure of Staleness's unit is usec. (1/1000000 sec) + * Suggested Cookie Life-span Increment's unit is msec. + * (1/1000 sec) + * In general, if you use the suggested cookie life, the value + * found in the field of measure of staleness should be doubled + * to give ample time to retransmit the new cookie and thus + * yield a higher probability of success on the reattempt. + */ + stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t))); + stale = (stale * 2) / 1000; + + bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; + bht.param_hdr.length = htons(sizeof(bht)); + bht.lifespan_increment = htonl(stale); + + /* Build that new INIT chunk. */ + bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; + reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht)); + if (!reply) + goto nomem; + + sctp_addto_chunk(reply, sizeof(bht), &bht); + + /* Clear peer's init_tag cached in assoc as we are sending a new INIT */ + sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL()); + + /* Stop pending T3-rtx and heartbeat timers */ + sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); + + /* Delete non-primary peer ip addresses since we are transitioning + * back to the COOKIE-WAIT state + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); + + /* If we've sent any data bundled with COOKIE-ECHO we will need to + * resend + */ + sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN, + SCTP_TRANSPORT(asoc->peer.primary_path)); + + /* Cast away the const modifier, as we want to just + * rerun it through as a sideffect. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL()); + + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Process an ABORT. + * + * Section: 9.1 + * After checking the Verification Tag, the receiving endpoint shall + * remove the association from its record, and shall report the + * termination to its upper layer. + * + * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules + * B) Rules for packet carrying ABORT: + * + * - The endpoint shall always fill in the Verification Tag field of the + * outbound packet with the destination endpoint's tag value if it + * is known. + * + * - If the ABORT is sent in response to an OOTB packet, the endpoint + * MUST follow the procedure described in Section 8.4. + * + * - The receiver MUST accept the packet if the Verification Tag + * matches either its own tag, OR the tag of its peer. Otherwise, the + * receiver MUST silently discard the packet and take no further + * action. + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ABORT chunk has a valid length. + * Since this is an ABORT chunk, we have to discard it + * because of the following text: + * RFC 2960, Section 3.3.7 + * If an endpoint receives an ABORT with a format error or for an + * association that doesn't exist, it MUST silently discard it. + * Because the length is "invalid", we can't really discard just + * as we do not know its true length. So, to be safe, discard the + * packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* ADD-IP: Special case for ABORT chunks + * F4) One special consideration is that ABORT Chunks arriving + * destined to the IP address being deleted MUST be + * ignored (see Section 5.3.1 for further details). + */ + if (SCTP_ADDR_DEL == + sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest)) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + + return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands); +} + +static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + unsigned int len; + __be16 error = SCTP_ERROR_NO_ERROR; + + /* See if we have an error cause code in the chunk. */ + len = ntohs(chunk->chunk_hdr->length); + if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) { + + sctp_errhdr_t *err; + sctp_walk_errors(err, chunk->chunk_hdr); + if ((void *)err != (void *)chunk->chunk_end) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + error = ((sctp_errhdr_t *)chunk->skb->data)->cause; + } + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); + /* ASSOC_FAILED will DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + + return SCTP_DISPOSITION_ABORT; +} + +/* + * Process an ABORT. (COOKIE-WAIT state) + * + * See sctp_sf_do_9_1_abort() above. + */ +sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + unsigned int len; + __be16 error = SCTP_ERROR_NO_ERROR; + + if (!sctp_vtag_verify_either(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ABORT chunk has a valid length. + * Since this is an ABORT chunk, we have to discard it + * because of the following text: + * RFC 2960, Section 3.3.7 + * If an endpoint receives an ABORT with a format error or for an + * association that doesn't exist, it MUST silently discard it. + * Because the length is "invalid", we can't really discard just + * as we do not know its true length. So, to be safe, discard the + * packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* See if we have an error cause code in the chunk. */ + len = ntohs(chunk->chunk_hdr->length); + if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) + error = ((sctp_errhdr_t *)chunk->skb->data)->cause; + + return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc, + chunk->transport); +} + +/* + * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) + */ +sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR, + ENOPROTOOPT, asoc, + (struct sctp_transport *)arg); +} + +/* + * Process an ABORT. (COOKIE-ECHOED state) + */ +sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* There is a single T1 timer, so we should be able to use + * common function with the COOKIE-WAIT state. + */ + return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands); +} + +/* + * Stop T1 timer and abort association with "INIT failed". + * + * This is common code called by several sctp_sf_*_abort() functions above. + */ +static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net, + sctp_cmd_seq_t *commands, + __be16 error, int sk_err, + const struct sctp_association *asoc, + struct sctp_transport *transport) +{ + pr_debug("%s: ABORT received (INIT)\n", __func__); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); + /* CMD_INIT_FAILED will DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(error)); + + return SCTP_DISPOSITION_ABORT; +} + +/* + * sctp_sf_do_9_2_shut + * + * Section: 9.2 + * Upon the reception of the SHUTDOWN, the peer endpoint shall + * - enter the SHUTDOWN-RECEIVED state, + * + * - stop accepting new data from its SCTP user + * + * - verify, by checking the Cumulative TSN Ack field of the chunk, + * that all its outstanding DATA chunks have been received by the + * SHUTDOWN sender. + * + * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT + * send a SHUTDOWN in response to a ULP request. And should discard + * subsequent SHUTDOWN chunks. + * + * If there are still outstanding DATA chunks left, the SHUTDOWN + * receiver shall continue to follow normal data transmission + * procedures defined in Section 6 until all outstanding DATA chunks + * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept + * new data from its SCTP user. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_shutdownhdr_t *sdh; + sctp_disposition_t disposition; + struct sctp_ulpevent *ev; + __u32 ctsn; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the SHUTDOWN chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, + sizeof(struct sctp_shutdown_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Convert the elaborate header. */ + sdh = (sctp_shutdownhdr_t *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); + chunk->subh.shutdown_hdr = sdh; + ctsn = ntohl(sdh->cum_tsn_ack); + + if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + + return SCTP_DISPOSITION_DISCARD; + } + + /* If Cumulative TSN Ack beyond the max tsn currently + * send, terminating the association and respond to the + * sender with an ABORT. + */ + if (!TSN_lt(ctsn, asoc->next_tsn)) + return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); + + /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT + * When a peer sends a SHUTDOWN, SCTP delivers this notification to + * inform the application that it should cease sending data. + */ + ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); + if (!ev) { + disposition = SCTP_DISPOSITION_NOMEM; + goto out; + } + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + + /* Upon the reception of the SHUTDOWN, the peer endpoint shall + * - enter the SHUTDOWN-RECEIVED state, + * - stop accepting new data from its SCTP user + * + * [This is implicit in the new state.] + */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED)); + disposition = SCTP_DISPOSITION_CONSUME; + + if (sctp_outq_is_empty(&asoc->outqueue)) { + disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type, + arg, commands); + } + + if (SCTP_DISPOSITION_NOMEM == disposition) + goto out; + + /* - verify, by checking the Cumulative TSN Ack field of the + * chunk, that all its outstanding DATA chunks have been + * received by the SHUTDOWN sender. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, + SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack)); + +out: + return disposition; +} + +/* + * sctp_sf_do_9_2_shut_ctsn + * + * Once an endpoint has reached the SHUTDOWN-RECEIVED state, + * it MUST NOT send a SHUTDOWN in response to a ULP request. + * The Cumulative TSN Ack of the received SHUTDOWN chunk + * MUST be processed. + */ +sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_shutdownhdr_t *sdh; + __u32 ctsn; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the SHUTDOWN chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, + sizeof(struct sctp_shutdown_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + sdh = (sctp_shutdownhdr_t *)chunk->skb->data; + ctsn = ntohl(sdh->cum_tsn_ack); + + if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + + return SCTP_DISPOSITION_DISCARD; + } + + /* If Cumulative TSN Ack beyond the max tsn currently + * send, terminating the association and respond to the + * sender with an ABORT. + */ + if (!TSN_lt(ctsn, asoc->next_tsn)) + return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); + + /* verify, by checking the Cumulative TSN Ack field of the + * chunk, that all its outstanding DATA chunks have been + * received by the SHUTDOWN sender. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, + SCTP_BE32(sdh->cum_tsn_ack)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* RFC 2960 9.2 + * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk + * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination + * transport addresses (either in the IP addresses or in the INIT chunk) + * that belong to this association, it should discard the INIT chunk and + * retransmit the SHUTDOWN ACK chunk. + */ +sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = (struct sctp_chunk *) arg; + struct sctp_chunk *reply; + + /* Make sure that the chunk has a valid length */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Since we are not going to really process this INIT, there + * is no point in verifying chunk boundries. Just generate + * the SHUTDOWN ACK. + */ + reply = sctp_make_shutdown_ack(asoc, chunk); + if (NULL == reply) + goto nomem; + + /* Set the transport for the SHUTDOWN ACK chunk and the timeout for + * the T2-SHUTDOWN timer. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); + + /* and restart the T2-shutdown timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + + return SCTP_DISPOSITION_CONSUME; +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * sctp_sf_do_ecn_cwr + * + * Section: Appendix A: Explicit Congestion Notification + * + * CWR: + * + * RFC 2481 details a specific bit for a sender to send in the header of + * its next outbound TCP segment to indicate to its peer that it has + * reduced its congestion window. This is termed the CWR bit. For + * SCTP the same indication is made by including the CWR chunk. + * This chunk contains one data element, i.e. the TSN number that + * was sent in the ECNE chunk. This element represents the lowest + * TSN number in the datagram that was originally marked with the + * CE bit. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_cwrhdr_t *cwr; + struct sctp_chunk *chunk = arg; + u32 lowest_tsn; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + cwr = (sctp_cwrhdr_t *) chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); + + lowest_tsn = ntohl(cwr->lowest_tsn); + + /* Does this CWR ack the last sent congestion notification? */ + if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) { + /* Stop sending ECNE. */ + sctp_add_cmd_sf(commands, + SCTP_CMD_ECN_CWR, + SCTP_U32(lowest_tsn)); + } + return SCTP_DISPOSITION_CONSUME; +} + +/* + * sctp_sf_do_ecne + * + * Section: Appendix A: Explicit Congestion Notification + * + * ECN-Echo + * + * RFC 2481 details a specific bit for a receiver to send back in its + * TCP acknowledgements to notify the sender of the Congestion + * Experienced (CE) bit having arrived from the network. For SCTP this + * same indication is made by including the ECNE chunk. This chunk + * contains one data element, i.e. the lowest TSN associated with the IP + * datagram marked with the CE bit..... + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_ecne(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_ecnehdr_t *ecne; + struct sctp_chunk *chunk = arg; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + ecne = (sctp_ecnehdr_t *) chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t)); + + /* If this is a newer ECNE than the last CWR packet we sent out */ + sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, + SCTP_U32(ntohl(ecne->lowest_tsn))); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Section: 6.2 Acknowledgement on Reception of DATA Chunks + * + * The SCTP endpoint MUST always acknowledge the reception of each valid + * DATA chunk. + * + * The guidelines on delayed acknowledgement algorithm specified in + * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an + * acknowledgement SHOULD be generated for at least every second packet + * (not every second DATA chunk) received, and SHOULD be generated within + * 200 ms of the arrival of any unacknowledged DATA chunk. In some + * situations it may be beneficial for an SCTP transmitter to be more + * conservative than the algorithms detailed in this document allow. + * However, an SCTP transmitter MUST NOT be more aggressive than the + * following algorithms allow. + * + * A SCTP receiver MUST NOT generate more than one SACK for every + * incoming packet, other than to update the offered window as the + * receiving application consumes new data. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_arg_t force = SCTP_NOFORCE(); + int error; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + error = sctp_eat_data(asoc, chunk, commands); + switch (error) { + case SCTP_IERROR_NO_ERROR: + break; + case SCTP_IERROR_HIGH_TSN: + case SCTP_IERROR_BAD_STREAM: + SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); + goto discard_noforce; + case SCTP_IERROR_DUP_TSN: + case SCTP_IERROR_IGNORE_TSN: + SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS); + goto discard_force; + case SCTP_IERROR_NO_DATA: + goto consume; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, + (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); + default: + BUG(); + } + + if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM) + force = SCTP_FORCE(); + + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + } + + /* If this is the last chunk in a packet, we need to count it + * toward sack generation. Note that we need to SACK every + * OTHER packet containing data chunks, EVEN IF WE DISCARD + * THEM. We elect to NOT generate SACK's if the chunk fails + * the verification tag test. + * + * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks + * + * The SCTP endpoint MUST always acknowledge the reception of + * each valid DATA chunk. + * + * The guidelines on delayed acknowledgement algorithm + * specified in Section 4.2 of [RFC2581] SHOULD be followed. + * Specifically, an acknowledgement SHOULD be generated for at + * least every second packet (not every second DATA chunk) + * received, and SHOULD be generated within 200 ms of the + * arrival of any unacknowledged DATA chunk. In some + * situations it may be beneficial for an SCTP transmitter to + * be more conservative than the algorithms detailed in this + * document allow. However, an SCTP transmitter MUST NOT be + * more aggressive than the following algorithms allow. + */ + if (chunk->end_of_packet) + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); + + return SCTP_DISPOSITION_CONSUME; + +discard_force: + /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks + * + * When a packet arrives with duplicate DATA chunk(s) and with + * no new DATA chunk(s), the endpoint MUST immediately send a + * SACK with no delay. If a packet arrives with duplicate + * DATA chunk(s) bundled with new DATA chunks, the endpoint + * MAY immediately send a SACK. Normally receipt of duplicate + * DATA chunks will occur when the original SACK chunk was lost + * and the peer's RTO has expired. The duplicate TSN number(s) + * SHOULD be reported in the SACK as duplicate. + */ + /* In our case, we split the MAY SACK advice up whether or not + * the last chunk is a duplicate.' + */ + if (chunk->end_of_packet) + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); + return SCTP_DISPOSITION_DISCARD; + +discard_noforce: + if (chunk->end_of_packet) + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force); + + return SCTP_DISPOSITION_DISCARD; +consume: + return SCTP_DISPOSITION_CONSUME; + +} + +/* + * sctp_sf_eat_data_fast_4_4 + * + * Section: 4 (4) + * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received + * DATA chunks without delay. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + int error; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + error = sctp_eat_data(asoc, chunk, commands); + switch (error) { + case SCTP_IERROR_NO_ERROR: + case SCTP_IERROR_HIGH_TSN: + case SCTP_IERROR_DUP_TSN: + case SCTP_IERROR_IGNORE_TSN: + case SCTP_IERROR_BAD_STREAM: + break; + case SCTP_IERROR_NO_DATA: + goto consume; + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_abort_violation(net, ep, asoc, chunk, commands, + (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t)); + default: + BUG(); + } + + /* Go a head and force a SACK, since we are shutting down. */ + + /* Implementor's Guide. + * + * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately + * respond to each received packet containing one or more DATA chunk(s) + * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer + */ + if (chunk->end_of_packet) { + /* We must delay the chunk creation since the cumulative + * TSN has not been updated yet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + } + +consume: + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Section: 6.2 Processing a Received SACK + * D) Any time a SACK arrives, the endpoint performs the following: + * + * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, + * then drop the SACK. Since Cumulative TSN Ack is monotonically + * increasing, a SACK whose Cumulative TSN Ack is less than the + * Cumulative TSN Ack Point indicates an out-of-order SACK. + * + * ii) Set rwnd equal to the newly received a_rwnd minus the number + * of bytes still outstanding after processing the Cumulative TSN Ack + * and the Gap Ack Blocks. + * + * iii) If the SACK is missing a TSN that was previously + * acknowledged via a Gap Ack Block (e.g., the data receiver + * reneged on the data), then mark the corresponding DATA chunk + * as available for retransmit: Mark it as missing for fast + * retransmit as described in Section 7.2.4 and if no retransmit + * timer is running for the destination address to which the DATA + * chunk was originally transmitted, then T3-rtx is started for + * that destination address. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_sackhdr_t *sackh; + __u32 ctsn; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the SACK chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Pull the SACK chunk from the data buffer */ + sackh = sctp_sm_pull_sack(chunk); + /* Was this a bogus SACK? */ + if (!sackh) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + chunk->subh.sack_hdr = sackh; + ctsn = ntohl(sackh->cum_tsn_ack); + + /* i) If Cumulative TSN Ack is less than the Cumulative TSN + * Ack Point, then drop the SACK. Since Cumulative TSN + * Ack is monotonically increasing, a SACK whose + * Cumulative TSN Ack is less than the Cumulative TSN Ack + * Point indicates an out-of-order SACK. + */ + if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { + pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn, + asoc->ctsn_ack_point); + + return SCTP_DISPOSITION_DISCARD; + } + + /* If Cumulative TSN Ack beyond the max tsn currently + * send, terminating the association and respond to the + * sender with an ABORT. + */ + if (!TSN_lt(ctsn, asoc->next_tsn)) + return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands); + + /* Return this SACK for further processing. */ + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk)); + + /* Note: We do the rest of the work on the PROCESS_SACK + * sideeffect. + */ + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Generate an ABORT in response to a packet. + * + * Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41 + * + * 8) The receiver should respond to the sender of the OOTB packet with + * an ABORT. When sending the ABORT, the receiver of the OOTB packet + * MUST fill in the Verification Tag field of the outbound packet + * with the value found in the Verification Tag field of the OOTB + * packet and set the T-bit in the Chunk Flags to indicate that the + * Verification Tag is reflected. After sending this ABORT, the + * receiver of the OOTB packet shall discard the OOTB packet and take + * no further action. + * + * Verification Tag: + * + * The return value is the disposition of the chunk. +*/ +static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_packet *packet = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *abort; + + packet = sctp_ootb_pkt_new(net, asoc, chunk); + + if (packet) { + /* Make an ABORT. The T bit will be set if the asoc + * is NULL. + */ + abort = sctp_make_abort(asoc, chunk, 0); + if (!abort) { + sctp_ootb_pkt_free(packet); + return SCTP_DISPOSITION_NOMEM; + } + + /* Reflect vtag if T-Bit is set */ + if (sctp_test_T_bit(abort)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); + + /* Set the skb to the belonging sock for accounting. */ + abort->skb->sk = ep->base.sk; + + sctp_packet_append_chunk(packet, abort); + + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + + sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_CONSUME; + } + + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR + * event as ULP notification for each cause included in the chunk. + * + * API 5.3.1.3 - SCTP_REMOTE_ERROR + * + * The return value is the disposition of the chunk. +*/ +sctp_disposition_t sctp_sf_operr_notify(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + sctp_errhdr_t *err; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the ERROR chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + sctp_walk_errors(err, chunk->chunk_hdr); + if ((void *)err != (void *)chunk->chunk_end) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err, commands); + + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, + SCTP_CHUNK(chunk)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Process an inbound SHUTDOWN ACK. + * + * From Section 9.2: + * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall + * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its + * peer, and remove all record of the association. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_9_2_final(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_chunk *reply; + struct sctp_ulpevent *ev; + + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + /* 10.2 H) SHUTDOWN COMPLETE notification + * + * When SCTP completes the shutdown procedures (section 9.2) this + * notification is passed to the upper layer. + */ + ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, + 0, 0, 0, NULL, GFP_ATOMIC); + if (!ev) + goto nomem; + + /* ...send a SHUTDOWN COMPLETE chunk to its peer, */ + reply = sctp_make_shutdown_complete(asoc, chunk); + if (!reply) + goto nomem_chunk; + + /* Do all the commands now (after allocation), so that we + * have consistent state if memory allocation failes + */ + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); + + /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall + * stop the T2-shutdown timer, + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + + /* ...and remove all record of the association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + return SCTP_DISPOSITION_DELETE_TCB; + +nomem_chunk: + sctp_ulpevent_free(ev); +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41. + * + * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should + * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. + * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB + * packet must fill in the Verification Tag field of the outbound + * packet with the Verification Tag received in the SHUTDOWN ACK and + * set the T-bit in the Chunk Flags to indicate that the Verification + * Tag is reflected. + * + * 8) The receiver should respond to the sender of the OOTB packet with + * an ABORT. When sending the ABORT, the receiver of the OOTB packet + * MUST fill in the Verification Tag field of the outbound packet + * with the value found in the Verification Tag field of the OOTB + * packet and set the T-bit in the Chunk Flags to indicate that the + * Verification Tag is reflected. After sending this ABORT, the + * receiver of the OOTB packet shall discard the OOTB packet and take + * no further action. + */ +sctp_disposition_t sctp_sf_ootb(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sk_buff *skb = chunk->skb; + sctp_chunkhdr_t *ch; + sctp_errhdr_t *err; + __u8 *ch_end; + int ootb_shut_ack = 0; + int ootb_cookie_ack = 0; + + SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); + + ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; + do { + /* Report violation if the chunk is less then minimal */ + if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Now that we know we at least have a chunk header, + * do things that are type appropriate. + */ + if (SCTP_CID_SHUTDOWN_ACK == ch->type) + ootb_shut_ack = 1; + + /* RFC 2960, Section 3.3.7 + * Moreover, under any circumstances, an endpoint that + * receives an ABORT MUST NOT respond to that ABORT by + * sending an ABORT of its own. + */ + if (SCTP_CID_ABORT == ch->type) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR + * or a COOKIE ACK the SCTP Packet should be silently + * discarded. + */ + + if (SCTP_CID_COOKIE_ACK == ch->type) + ootb_cookie_ack = 1; + + if (SCTP_CID_ERROR == ch->type) { + sctp_walk_errors(err, ch) { + if (SCTP_ERROR_STALE_COOKIE == err->cause) { + ootb_cookie_ack = 1; + break; + } + } + } + + /* Report violation if chunk len overflows */ + ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); + if (ch_end > skb_tail_pointer(skb)) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + ch = (sctp_chunkhdr_t *) ch_end; + } while (ch_end < skb_tail_pointer(skb)); + + if (ootb_shut_ack) + return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands); + else if (ootb_cookie_ack) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + else + return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands); +} + +/* + * Handle an "Out of the blue" SHUTDOWN ACK. + * + * Section: 8.4 5, sctpimpguide 2.41. + * + * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should + * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. + * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB + * packet must fill in the Verification Tag field of the outbound + * packet with the Verification Tag received in the SHUTDOWN ACK and + * set the T-bit in the Chunk Flags to indicate that the Verification + * Tag is reflected. + * + * Inputs + * (endpoint, asoc, type, arg, commands) + * + * Outputs + * (sctp_disposition_t) + * + * The return value is the disposition of the chunk. + */ +static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_packet *packet = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *shut; + + packet = sctp_ootb_pkt_new(net, asoc, chunk); + + if (packet) { + /* Make an SHUTDOWN_COMPLETE. + * The T bit will be set if the asoc is NULL. + */ + shut = sctp_make_shutdown_complete(asoc, chunk); + if (!shut) { + sctp_ootb_pkt_free(packet); + return SCTP_DISPOSITION_NOMEM; + } + + /* Reflect vtag if T-Bit is set */ + if (sctp_test_T_bit(shut)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); + + /* Set the skb to the belonging sock for accounting. */ + shut->skb->sk = ep->base.sk; + + sctp_packet_append_chunk(packet, shut); + + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + + /* If the chunk length is invalid, we don't want to process + * the reset of the packet. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* We need to discard the rest of the packet to prevent + * potential bomming attacks from additional bundled chunks. + * This is documented in SCTP Threats ID. + */ + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state. + * + * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK + * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the + * procedures in section 8.4 SHOULD be followed, in other words it + * should be treated as an Out Of The Blue packet. + * [This means that we do NOT check the Verification Tag on these + * chunks. --piggy ] + * + */ +sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + /* Although we do have an association in this case, it corresponds + * to a restarted association. So the packet is treated as an OOTB + * packet and the state function that handles OOTB SHUTDOWN_ACK is + * called with a NULL association. + */ + SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES); + + return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands); +} + +/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ +sctp_disposition_t sctp_sf_do_asconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_chunk *asconf_ack = NULL; + struct sctp_paramhdr *err_param = NULL; + sctp_addiphdr_t *hdr; + __u32 serial; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* ADD-IP: Section 4.1.1 + * This chunk MUST be sent in an authenticated way by using + * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk + * is received unauthenticated it MUST be silently discarded as + * described in [I-D.ietf-tsvwg-sctp-auth]. + */ + if (!net->sctp.addip_noauth && !chunk->auth) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + + /* Make sure that the ASCONF ADDIP chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + hdr = (sctp_addiphdr_t *)chunk->skb->data; + serial = ntohl(hdr->serial); + + /* Verify the ASCONF chunk before processing it. */ + if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + + /* ADDIP 5.2 E1) Compare the value of the serial number to the value + * the endpoint stored in a new association variable + * 'Peer-Serial-Number'. + */ + if (serial == asoc->peer.addip_serial + 1) { + /* If this is the first instance of ASCONF in the packet, + * we can clean our old ASCONF-ACKs. + */ + if (!chunk->has_asconf) + sctp_assoc_clean_asconf_ack_cache(asoc); + + /* ADDIP 5.2 E4) When the Sequence Number matches the next one + * expected, process the ASCONF as described below and after + * processing the ASCONF Chunk, append an ASCONF-ACK Chunk to + * the response packet and cache a copy of it (in the event it + * later needs to be retransmitted). + * + * Essentially, do V1-V5. + */ + asconf_ack = sctp_process_asconf((struct sctp_association *) + asoc, chunk); + if (!asconf_ack) + return SCTP_DISPOSITION_NOMEM; + } else if (serial < asoc->peer.addip_serial + 1) { + /* ADDIP 5.2 E2) + * If the value found in the Sequence Number is less than the + * ('Peer- Sequence-Number' + 1), simply skip to the next + * ASCONF, and include in the outbound response packet + * any previously cached ASCONF-ACK response that was + * sent and saved that matches the Sequence Number of the + * ASCONF. Note: It is possible that no cached ASCONF-ACK + * Chunk exists. This will occur when an older ASCONF + * arrives out of order. In such a case, the receiver + * should skip the ASCONF Chunk and not include ASCONF-ACK + * Chunk for that chunk. + */ + asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial); + if (!asconf_ack) + return SCTP_DISPOSITION_DISCARD; + + /* Reset the transport so that we select the correct one + * this time around. This is to make sure that we don't + * accidentally use a stale transport that's been removed. + */ + asconf_ack->transport = NULL; + } else { + /* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since + * it must be either a stale packet or from an attacker. + */ + return SCTP_DISPOSITION_DISCARD; + } + + /* ADDIP 5.2 E6) The destination address of the SCTP packet + * containing the ASCONF-ACK Chunks MUST be the source address of + * the SCTP packet that held the ASCONF Chunks. + * + * To do this properly, we'll set the destination address of the chunk + * and at the transmit time, will try look up the transport to use. + * Since ASCONFs may be bundled, the correct transport may not be + * created until we process the entire packet, thus this workaround. + */ + asconf_ack->dest = chunk->source; + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); + if (asoc->new_transport) { + sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands); + ((struct sctp_association *)asoc)->new_transport = NULL; + } + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * ADDIP Section 4.3 General rules for address manipulation + * When building TLV parameters for the ASCONF Chunk that will add or + * delete IP addresses the D0 to D13 rules should be applied: + */ +sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *asconf_ack = arg; + struct sctp_chunk *last_asconf = asoc->addip_last_asconf; + struct sctp_chunk *abort; + struct sctp_paramhdr *err_param = NULL; + sctp_addiphdr_t *addip_hdr; + __u32 sent_serial, rcvd_serial; + + if (!sctp_vtag_verify(asconf_ack, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* ADD-IP, Section 4.1.2: + * This chunk MUST be sent in an authenticated way by using + * the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk + * is received unauthenticated it MUST be silently discarded as + * described in [I-D.ietf-tsvwg-sctp-auth]. + */ + if (!net->sctp.addip_noauth && !asconf_ack->auth) + return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands); + + /* Make sure that the ADDIP chunk has a valid length. */ + if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; + rcvd_serial = ntohl(addip_hdr->serial); + + /* Verify the ASCONF-ACK chunk before processing it. */ + if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + + if (last_asconf) { + addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; + sent_serial = ntohl(addip_hdr->serial); + } else { + sent_serial = asoc->addip_serial - 1; + } + + /* D0) If an endpoint receives an ASCONF-ACK that is greater than or + * equal to the next serial number to be used but no ASCONF chunk is + * outstanding the endpoint MUST ABORT the association. Note that a + * sequence number is greater than if it is no more than 2^^31-1 + * larger than the current sequence number (using serial arithmetic). + */ + if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && + !(asoc->addip_last_asconf)) { + abort = sctp_make_abort(asoc, asconf_ack, + sizeof(sctp_errhdr_t)); + if (abort) { + sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(abort)); + } + /* We are going to ABORT, so we might as well stop + * processing the rest of the chunks in the packet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_ABORT; + } + + if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + + if (!sctp_process_asconf_ack((struct sctp_association *)asoc, + asconf_ack)) { + /* Successfully processed ASCONF_ACK. We can + * release the next asconf if we have one. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF, + SCTP_NULL()); + return SCTP_DISPOSITION_CONSUME; + } + + abort = sctp_make_abort(asoc, asconf_ack, + sizeof(sctp_errhdr_t)); + if (abort) { + sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(abort)); + } + /* We are going to ABORT, so we might as well stop + * processing the rest of the chunks in the packet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_ASCONF_ACK)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_ABORT; + } + + return SCTP_DISPOSITION_DISCARD; +} + +/* + * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP + * + * When a FORWARD TSN chunk arrives, the data receiver MUST first update + * its cumulative TSN point to the value carried in the FORWARD TSN + * chunk, and then MUST further advance its cumulative TSN point locally + * if possible. + * After the above processing, the data receiver MUST stop reporting any + * missing TSNs earlier than or equal to the new cumulative TSN point. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_fwdtsn_skip *skip; + __u16 len; + __u32 tsn; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the FORWARD_TSN chunk has valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; + chunk->subh.fwdtsn_hdr = fwdtsn_hdr; + len = ntohs(chunk->chunk_hdr->length); + len -= sizeof(struct sctp_chunkhdr); + skb_pull(chunk->skb, len); + + tsn = ntohl(fwdtsn_hdr->new_cum_tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); + + /* The TSN is too high--silently discard the chunk and count on it + * getting retransmitted later. + */ + if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) + goto discard_noforce; + + /* Silently discard the chunk if stream-id is not valid */ + sctp_walk_fwdtsn(skip, chunk) { + if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + goto discard_noforce; + } + + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); + if (len > sizeof(struct sctp_fwdtsn_hdr)) + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, + SCTP_CHUNK(chunk)); + + /* Count this as receiving DATA. */ + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + } + + /* FIXME: For now send a SACK, but DATA processing may + * send another. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); + + return SCTP_DISPOSITION_CONSUME; + +discard_noforce: + return SCTP_DISPOSITION_DISCARD; +} + +sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_fwdtsn_hdr *fwdtsn_hdr; + struct sctp_fwdtsn_skip *skip; + __u16 len; + __u32 tsn; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the FORWARD_TSN chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; + chunk->subh.fwdtsn_hdr = fwdtsn_hdr; + len = ntohs(chunk->chunk_hdr->length); + len -= sizeof(struct sctp_chunkhdr); + skb_pull(chunk->skb, len); + + tsn = ntohl(fwdtsn_hdr->new_cum_tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); + + /* The TSN is too high--silently discard the chunk and count on it + * getting retransmitted later. + */ + if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) + goto gen_shutdown; + + /* Silently discard the chunk if stream-id is not valid */ + sctp_walk_fwdtsn(skip, chunk) { + if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams) + goto gen_shutdown; + } + + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); + if (len > sizeof(struct sctp_fwdtsn_hdr)) + sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, + SCTP_CHUNK(chunk)); + + /* Go a head and force a SACK, since we are shutting down. */ +gen_shutdown: + /* Implementor's Guide. + * + * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately + * respond to each received packet containing one or more DATA chunk(s) + * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer + */ + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * SCTP-AUTH Section 6.3 Receiving authenticated chukns + * + * The receiver MUST use the HMAC algorithm indicated in the HMAC + * Identifier field. If this algorithm was not specified by the + * receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk + * during association setup, the AUTH chunk and all chunks after it MUST + * be discarded and an ERROR chunk SHOULD be sent with the error cause + * defined in Section 4.1. + * + * If an endpoint with no shared key receives a Shared Key Identifier + * other than 0, it MUST silently discard all authenticated chunks. If + * the endpoint has at least one endpoint pair shared key for the peer, + * it MUST use the key specified by the Shared Key Identifier if a + * key has been configured for that Shared Key Identifier. If no + * endpoint pair shared key has been configured for that Shared Key + * Identifier, all authenticated chunks MUST be silently discarded. + * + * Verification Tag: 8.5 Verification Tag [Normal verification] + * + * The return value is the disposition of the chunk. + */ +static sctp_ierror_t sctp_sf_authenticate(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + struct sctp_chunk *chunk) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_hmac *hmac; + unsigned int sig_len; + __u16 key_id; + __u8 *save_digest; + __u8 *digest; + + /* Pull in the auth header, so we can do some more verification */ + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + chunk->subh.auth_hdr = auth_hdr; + skb_pull(chunk->skb, sizeof(struct sctp_authhdr)); + + /* Make sure that we support the HMAC algorithm from the auth + * chunk. + */ + if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id)) + return SCTP_IERROR_AUTH_BAD_HMAC; + + /* Make sure that the provided shared key identifier has been + * configured + */ + key_id = ntohs(auth_hdr->shkey_id); + if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id)) + return SCTP_IERROR_AUTH_BAD_KEYID; + + + /* Make sure that the length of the signature matches what + * we expect. + */ + sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t); + hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id)); + if (sig_len != hmac->hmac_len) + return SCTP_IERROR_PROTO_VIOLATION; + + /* Now that we've done validation checks, we can compute and + * verify the hmac. The steps involved are: + * 1. Save the digest from the chunk. + * 2. Zero out the digest in the chunk. + * 3. Compute the new digest + * 4. Compare saved and new digests. + */ + digest = auth_hdr->hmac; + skb_pull(chunk->skb, sig_len); + + save_digest = kmemdup(digest, sig_len, GFP_ATOMIC); + if (!save_digest) + goto nomem; + + memset(digest, 0, sig_len); + + sctp_auth_calculate_hmac(asoc, chunk->skb, + (struct sctp_auth_chunk *)chunk->chunk_hdr, + GFP_ATOMIC); + + /* Discard the packet if the digests do not match */ + if (memcmp(save_digest, digest, sig_len)) { + kfree(save_digest); + return SCTP_IERROR_BAD_SIG; + } + + kfree(save_digest); + chunk->auth = 1; + + return SCTP_IERROR_NO_ERROR; +nomem: + return SCTP_IERROR_NOMEM; +} + +sctp_disposition_t sctp_sf_eat_auth(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_authhdr *auth_hdr; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *err_chunk; + sctp_ierror_t error; + + /* Make sure that the peer has AUTH capable */ + if (!asoc->peer.auth_capable) + return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands); + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, + SCTP_NULL()); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + + /* Make sure that the AUTH chunk has valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + auth_hdr = (struct sctp_authhdr *)chunk->skb->data; + error = sctp_sf_authenticate(net, ep, asoc, type, chunk); + switch (error) { + case SCTP_IERROR_AUTH_BAD_HMAC: + /* Generate the ERROR chunk and discard the rest + * of the packet + */ + err_chunk = sctp_make_op_error(asoc, chunk, + SCTP_ERROR_UNSUP_HMAC, + &auth_hdr->hmac_id, + sizeof(__u16), 0); + if (err_chunk) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err_chunk)); + } + /* Fall Through */ + case SCTP_IERROR_AUTH_BAD_KEYID: + case SCTP_IERROR_BAD_SIG: + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + case SCTP_IERROR_PROTO_VIOLATION: + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + case SCTP_IERROR_NOMEM: + return SCTP_DISPOSITION_NOMEM; + + default: /* Prevent gcc warnings */ + break; + } + + if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { + struct sctp_ulpevent *ev; + + ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id), + SCTP_AUTH_NEWKEY, GFP_ATOMIC); + + if (!ev) + return -ENOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, + SCTP_ULPEVENT(ev)); + } + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Process an unknown chunk. + * + * Section: 3.2. Also, 2.1 in the implementor's guide. + * + * Chunk Types are encoded such that the highest-order two bits specify + * the action that must be taken if the processing endpoint does not + * recognize the Chunk Type. + * + * 00 - Stop processing this SCTP packet and discard it, do not process + * any further chunks within it. + * + * 01 - Stop processing this SCTP packet and discard it, do not process + * any further chunks within it, and report the unrecognized + * chunk in an 'Unrecognized Chunk Type'. + * + * 10 - Skip this chunk and continue processing. + * + * 11 - Skip this chunk and continue processing, but report in an ERROR + * Chunk using the 'Unrecognized Chunk Type' cause of error. + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_unk_chunk(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *unk_chunk = arg; + struct sctp_chunk *err_chunk; + sctp_chunkhdr_t *hdr; + + pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk); + + if (!sctp_vtag_verify(unk_chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the chunk has a valid length. + * Since we don't know the chunk type, we use a general + * chunkhdr structure to make a comparison. + */ + if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + switch (type.chunk & SCTP_CID_ACTION_MASK) { + case SCTP_CID_ACTION_DISCARD: + /* Discard the packet. */ + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + case SCTP_CID_ACTION_DISCARD_ERR: + /* Generate an ERROR chunk as response. */ + hdr = unk_chunk->chunk_hdr; + err_chunk = sctp_make_op_error(asoc, unk_chunk, + SCTP_ERROR_UNKNOWN_CHUNK, hdr, + WORD_ROUND(ntohs(hdr->length)), + 0); + if (err_chunk) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err_chunk)); + } + + /* Discard the packet. */ + sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + return SCTP_DISPOSITION_CONSUME; + case SCTP_CID_ACTION_SKIP: + /* Skip the chunk. */ + return SCTP_DISPOSITION_DISCARD; + case SCTP_CID_ACTION_SKIP_ERR: + /* Generate an ERROR chunk as response. */ + hdr = unk_chunk->chunk_hdr; + err_chunk = sctp_make_op_error(asoc, unk_chunk, + SCTP_ERROR_UNKNOWN_CHUNK, hdr, + WORD_ROUND(ntohs(hdr->length)), + 0); + if (err_chunk) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err_chunk)); + } + /* Skip the chunk. */ + return SCTP_DISPOSITION_CONSUME; + default: + break; + } + + return SCTP_DISPOSITION_DISCARD; +} + +/* + * Discard the chunk. + * + * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2 + * [Too numerous to mention...] + * Verification Tag: No verification needed. + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_discard_chunk(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + /* Make sure that the chunk has a valid length. + * Since we don't know the chunk type, we use a general + * chunkhdr structure to make a comparison. + */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk); + + return SCTP_DISPOSITION_DISCARD; +} + +/* + * Discard the whole packet. + * + * Section: 8.4 2) + * + * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST + * silently discard the OOTB packet and take no further action. + * + * Verification Tag: No verification necessary + * + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_pdiscard(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS); + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); + + return SCTP_DISPOSITION_CONSUME; +} + + +/* + * The other end is violating protocol. + * + * Section: Not specified + * Verification Tag: Not specified + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (asoc, reply_msg, msg_up, timers, counters) + * + * We simply tag the chunk as a violation. The state machine will log + * the violation and continue. + */ +sctp_disposition_t sctp_sf_violation(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + /* Make sure that the chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + + return SCTP_DISPOSITION_VIOLATION; +} + +/* + * Common function to handle a protocol violation. + */ +static sctp_disposition_t sctp_sf_abort_violation( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + void *arg, + sctp_cmd_seq_t *commands, + const __u8 *payload, + const size_t paylen) +{ + struct sctp_packet *packet = NULL; + struct sctp_chunk *chunk = arg; + struct sctp_chunk *abort = NULL; + + /* SCTP-AUTH, Section 6.3: + * It should be noted that if the receiver wants to tear + * down an association in an authenticated way only, the + * handling of malformed packets should not result in + * tearing down the association. + * + * This means that if we only want to abort associations + * in an authenticated way (i.e AUTH+ABORT), then we + * can't destroy this association just because the packet + * was malformed. + */ + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + goto discard; + + /* Make the abort chunk. */ + abort = sctp_make_abort_violation(asoc, chunk, payload, paylen); + if (!abort) + goto nomem; + + if (asoc) { + /* Treat INIT-ACK as a special case during COOKIE-WAIT. */ + if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK && + !asoc->peer.i.init_tag) { + sctp_initack_chunk_t *initack; + + initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; + if (!sctp_chunk_length_valid(chunk, + sizeof(sctp_initack_chunk_t))) + abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T; + else { + unsigned int inittag; + + inittag = ntohl(initack->init_hdr.init_tag); + sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG, + SCTP_U32(inittag)); + } + } + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + + if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNREFUSED)); + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + } + } else { + packet = sctp_ootb_pkt_new(net, asoc, chunk); + + if (!packet) + goto nomem_pkt; + + if (sctp_test_T_bit(abort)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); + + abort->skb->sk = ep->base.sk; + + sctp_packet_append_chunk(packet, abort); + + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + } + + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + +discard: + sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); + return SCTP_DISPOSITION_ABORT; + +nomem_pkt: + sctp_chunk_free(abort); +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Handle a protocol violation when the chunk length is invalid. + * "Invalid" length is identified as smaller than the minimal length a + * given chunk can be. For example, a SACK chunk has invalid length + * if its length is set to be smaller than the size of sctp_sack_chunk_t. + * + * We inform the other end by sending an ABORT with a Protocol Violation + * error code. + * + * Section: Not specified + * Verification Tag: Nothing to do + * Inputs + * (endpoint, asoc, chunk) + * + * Outputs + * (reply_msg, msg_up, counters) + * + * Generate an ABORT chunk and terminate the association. + */ +static sctp_disposition_t sctp_sf_violation_chunklen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + static const char err_str[] = "The following chunk had invalid length:"; + + return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, + sizeof(err_str)); +} + +/* + * Handle a protocol violation when the parameter length is invalid. + * If the length is smaller than the minimum length of a given parameter, + * or accumulated length in multi parameters exceeds the end of the chunk, + * the length is considered as invalid. + */ +static sctp_disposition_t sctp_sf_violation_paramlen( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, void *ext, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + struct sctp_paramhdr *param = ext; + struct sctp_chunk *abort = NULL; + + if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) + goto discard; + + /* Make the abort chunk. */ + abort = sctp_make_violation_paramlen(asoc, chunk, param); + if (!abort) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION)); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + +discard: + sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands); + return SCTP_DISPOSITION_ABORT; +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* Handle a protocol violation when the peer trying to advance the + * cumulative tsn ack to a point beyond the max tsn currently sent. + * + * We inform the other end by sending an ABORT with a Protocol Violation + * error code. + */ +static sctp_disposition_t sctp_sf_violation_ctsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:"; + + return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, + sizeof(err_str)); +} + +/* Handle protocol violation of an invalid chunk bundling. For example, + * when we have an association and we receive bundled INIT-ACK, or + * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" + * statement from the specs. Additionally, there might be an attacker + * on the path and we may not want to continue this communication. + */ +static sctp_disposition_t sctp_sf_violation_chunk( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + static const char err_str[] = "The following chunk violates protocol:"; + + if (!asoc) + return sctp_sf_violation(net, ep, asoc, type, arg, commands); + + return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str, + sizeof(err_str)); +} +/*************************************************************************** + * These are the state functions for handling primitive (Section 10) events. + ***************************************************************************/ +/* + * sctp_sf_do_prm_asoc + * + * Section: 10.1 ULP-to-SCTP + * B) Associate + * + * Format: ASSOCIATE(local SCTP instance name, destination transport addr, + * outbound stream count) + * -> association id [,destination transport addr list] [,outbound stream + * count] + * + * This primitive allows the upper layer to initiate an association to a + * specific peer endpoint. + * + * The peer endpoint shall be specified by one of the transport addresses + * which defines the endpoint (see Section 1.4). If the local SCTP + * instance has not been initialized, the ASSOCIATE is considered an + * error. + * [This is not relevant for the kernel implementation since we do all + * initialization at boot time. It we hadn't initialized we wouldn't + * get anywhere near this code.] + * + * An association id, which is a local handle to the SCTP association, + * will be returned on successful establishment of the association. If + * SCTP is not able to open an SCTP association with the peer endpoint, + * an error is returned. + * [In the kernel implementation, the struct sctp_association needs to + * be created BEFORE causing this primitive to run.] + * + * Other association parameters may be returned, including the + * complete destination transport addresses of the peer as well as the + * outbound stream count of the local endpoint. One of the transport + * address from the returned destination addresses will be selected by + * the local endpoint as default primary path for sending SCTP packets + * to this peer. The returned "destination transport addr list" can + * be used by the ULP to change the default primary path or to force + * sending a packet to a specific transport address. [All of this + * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING + * function.] + * + * Mandatory attributes: + * + * o local SCTP instance name - obtained from the INITIALIZE operation. + * [This is the argument asoc.] + * o destination transport addr - specified as one of the transport + * addresses of the peer endpoint with which the association is to be + * established. + * [This is asoc->peer.active_path.] + * o outbound stream count - the number of outbound streams the ULP + * would like to open towards this peer endpoint. + * [BUG: This is not currently implemented.] + * Optional attributes: + * + * None. + * + * The return value is a disposition. + */ +sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *repl; + struct sctp_association *my_asoc; + + /* The comment below says that we enter COOKIE-WAIT AFTER + * sending the INIT, but that doesn't actually work in our + * implementation... + */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); + + /* RFC 2960 5.1 Normal Establishment of an Association + * + * A) "A" first sends an INIT chunk to "Z". In the INIT, "A" + * must provide its Verification Tag (Tag_A) in the Initiate + * Tag field. Tag_A SHOULD be a random number in the range of + * 1 to 4294967295 (see 5.3.1 for Tag value selection). ... + */ + + repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0); + if (!repl) + goto nomem; + + /* Choose transport for INIT. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, + SCTP_CHUNK(repl)); + + /* Cast away the const modifier, as we want to just + * rerun it through as a sideffect. + */ + my_asoc = (struct sctp_association *)asoc; + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc)); + + /* After sending the INIT, "A" starts the T1-init timer and + * enters the COOKIE-WAIT state. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Process the SEND primitive. + * + * Section: 10.1 ULP-to-SCTP + * E) Send + * + * Format: SEND(association id, buffer address, byte count [,context] + * [,stream id] [,life time] [,destination transport address] + * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) + * -> result + * + * This is the main method to send user data via SCTP. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * o buffer address - the location where the user message to be + * transmitted is stored; + * + * o byte count - The size of the user data in number of bytes; + * + * Optional attributes: + * + * o context - an optional 32 bit integer that will be carried in the + * sending failure notification to the ULP if the transportation of + * this User Message fails. + * + * o stream id - to indicate which stream to send the data on. If not + * specified, stream 0 will be used. + * + * o life time - specifies the life time of the user data. The user data + * will not be sent by SCTP after the life time expires. This + * parameter can be used to avoid efforts to transmit stale + * user messages. SCTP notifies the ULP if the data cannot be + * initiated to transport (i.e. sent to the destination via SCTP's + * send primitive) within the life time variable. However, the + * user data will be transmitted if SCTP has attempted to transmit a + * chunk before the life time expired. + * + * o destination transport address - specified as one of the destination + * transport addresses of the peer endpoint to which this packet + * should be sent. Whenever possible, SCTP should use this destination + * transport address for sending the packets, instead of the current + * primary path. + * + * o unorder flag - this flag, if present, indicates that the user + * would like the data delivered in an unordered fashion to the peer + * (i.e., the U flag is set to 1 on all DATA chunks carrying this + * message). + * + * o no-bundle flag - instructs SCTP not to bundle this user data with + * other outbound DATA chunks. SCTP MAY still bundle even when + * this flag is present, when faced with network congestion. + * + * o payload protocol-id - A 32 bit unsigned integer that is to be + * passed to the peer indicating the type of payload protocol data + * being transmitted. This value is passed as opaque data by SCTP. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_prm_send(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_datamsg *msg = arg; + + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg)); + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Process the SHUTDOWN primitive. + * + * Section: 10.1: + * C) Shutdown + * + * Format: SHUTDOWN(association id) + * -> result + * + * Gracefully closes an association. Any locally queued user data + * will be delivered to the peer. The association will be terminated only + * after the peer acknowledges all the SCTP packets sent. A success code + * will be returned on successful termination of the association. If + * attempting to terminate the association results in a failure, an error + * code shall be returned. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * Optional attributes: + * + * None. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + int disposition; + + /* From 9.2 Shutdown of an Association + * Upon receipt of the SHUTDOWN primitive from its upper + * layer, the endpoint enters SHUTDOWN-PENDING state and + * remains there until all outstanding data has been + * acknowledged by its peer. The endpoint accepts no new data + * from its upper layer, but retransmits data to the far end + * if necessary to fill gaps. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); + + disposition = SCTP_DISPOSITION_CONSUME; + if (sctp_outq_is_empty(&asoc->outqueue)) { + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, + arg, commands); + } + return disposition; +} + +/* + * Process the ABORT primitive. + * + * Section: 10.1: + * C) Abort + * + * Format: Abort(association id [, cause code]) + * -> result + * + * Ungracefully closes an association. Any locally queued user data + * will be discarded and an ABORT chunk is sent to the peer. A success code + * will be returned on successful abortion of the association. If + * attempting to abort the association results in a failure, an error + * code shall be returned. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * Optional attributes: + * + * o cause code - reason of the abort to be passed to the peer + * + * None. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_9_1_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* From 9.1 Abort of an Association + * Upon receipt of the ABORT primitive from its upper + * layer, the endpoint enters CLOSED state and + * discard all outstanding data has been + * acknowledged by its peer. The endpoint accepts no new data + * from its upper layer, but retransmits data to the far end + * if necessary to fill gaps. + */ + struct sctp_chunk *abort = arg; + sctp_disposition_t retval; + + retval = SCTP_DISPOSITION_CONSUME; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + + /* Even if we can't send the ABORT due to low memory delete the + * TCB. This is a departure from our typical NOMEM handling. + */ + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + /* Delete the established association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_USER_ABORT)); + + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + + return retval; +} + +/* We tried an illegal operation on an association which is closed. */ +sctp_disposition_t sctp_sf_error_closed(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); + return SCTP_DISPOSITION_CONSUME; +} + +/* We tried an illegal operation on an association which is shutting + * down. + */ +sctp_disposition_t sctp_sf_error_shutdown(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, + SCTP_ERROR(-ESHUTDOWN)); + return SCTP_DISPOSITION_CONSUME; +} + +/* + * sctp_cookie_wait_prm_shutdown + * + * Section: 4 Note: 2 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * The RFC does not explicitly address this issue, but is the route through the + * state table when someone issues a shutdown while in COOKIE_WAIT state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + + SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS); + + sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); + + return SCTP_DISPOSITION_DELETE_TCB; +} + +/* + * sctp_cookie_echoed_prm_shutdown + * + * Section: 4 Note: 2 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * The RFC does not explcitly address this issue, but is the route through the + * state table when someone issues a shutdown while in COOKIE_ECHOED state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, sctp_cmd_seq_t *commands) +{ + /* There is a single T1 timer, so we should be able to use + * common function with the COOKIE-WAIT state. + */ + return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands); +} + +/* + * sctp_sf_cookie_wait_prm_abort + * + * Section: 4 Note: 2 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * The RFC does not explicitly address this issue, but is the route through the + * state table when someone issues an abort while in COOKIE_WAIT state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_cookie_wait_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *abort = arg; + sctp_disposition_t retval; + + /* Stop T1-init timer */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + retval = SCTP_DISPOSITION_CONSUME; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); + + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_CLOSED)); + + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + + /* Even if we can't send the ABORT due to low memory delete the + * TCB. This is a departure from our typical NOMEM handling. + */ + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNREFUSED)); + /* Delete the established association. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_USER_ABORT)); + + return retval; +} + +/* + * sctp_sf_cookie_echoed_prm_abort + * + * Section: 4 Note: 3 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * The RFC does not explcitly address this issue, but is the route through the + * state table when someone issues an abort while in COOKIE_ECHOED state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* There is a single T1 timer, so we should be able to use + * common function with the COOKIE-WAIT state. + */ + return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands); +} + +/* + * sctp_sf_shutdown_pending_prm_abort + * + * Inputs + * (endpoint, asoc) + * + * The RFC does not explicitly address this issue, but is the route through the + * state table when someone issues an abort while in SHUTDOWN-PENDING state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* Stop the T5-shutdown guard timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); +} + +/* + * sctp_sf_shutdown_sent_prm_abort + * + * Inputs + * (endpoint, asoc) + * + * The RFC does not explicitly address this issue, but is the route through the + * state table when someone issues an abort while in SHUTDOWN-SENT state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* Stop the T2-shutdown timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + /* Stop the T5-shutdown guard timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands); +} + +/* + * sctp_sf_cookie_echoed_prm_abort + * + * Inputs + * (endpoint, asoc) + * + * The RFC does not explcitly address this issue, but is the route through the + * state table when someone issues an abort while in COOKIE_ECHOED state. + * + * Outputs + * (timers) + */ +sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + /* The same T2 timer, so we should be able to use + * common function with the SHUTDOWN-SENT state. + */ + return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands); +} + +/* + * Process the REQUESTHEARTBEAT primitive + * + * 10.1 ULP-to-SCTP + * J) Request Heartbeat + * + * Format: REQUESTHEARTBEAT(association id, destination transport address) + * + * -> result + * + * Instructs the local endpoint to perform a HeartBeat on the specified + * destination transport address of the given association. The returned + * result should indicate whether the transmission of the HEARTBEAT + * chunk to the destination address is successful. + * + * Mandatory attributes: + * + * o association id - local handle to the SCTP association + * + * o destination transport address - the transport address of the + * association on which a heartbeat should be issued. + */ +sctp_disposition_t sctp_sf_do_prm_requestheartbeat( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type, + (struct sctp_transport *)arg, commands)) + return SCTP_DISPOSITION_NOMEM; + + /* + * RFC 2960 (bis), section 8.3 + * + * D) Request an on-demand HEARTBEAT on a specific destination + * transport address of a given association. + * + * The endpoint should increment the respective error counter of + * the destination transport address each time a HEARTBEAT is sent + * to that address and not acknowledged within one RTO. + * + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT, + SCTP_TRANSPORT(arg)); + return SCTP_DISPOSITION_CONSUME; +} + +/* + * ADDIP Section 4.1 ASCONF Chunk Procedures + * When an endpoint has an ASCONF signaled change to be sent to the + * remote endpoint it should do A1 to A9 + */ +sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = arg; + + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Ignore the primitive event + * + * The return value is the disposition of the primitive. + */ +sctp_disposition_t sctp_sf_ignore_primitive( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + pr_debug("%s: primitive type:%d is ignored\n", __func__, + type.primitive); + + return SCTP_DISPOSITION_DISCARD; +} + +/*************************************************************************** + * These are the state functions for the OTHER events. + ***************************************************************************/ + +/* + * When the SCTP stack has no more user data to send or retransmit, this + * notification is given to the user. Also, at the time when a user app + * subscribes to this event, if there is no data to be sent or + * retransmit, the stack will immediately send up this notification. + */ +sctp_disposition_t sctp_sf_do_no_pending_tsn( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_ulpevent *event; + + event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC); + if (!event) + return SCTP_DISPOSITION_NOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Start the shutdown negotiation. + * + * From Section 9.2: + * Once all its outstanding data has been acknowledged, the endpoint + * shall send a SHUTDOWN chunk to its peer including in the Cumulative + * TSN Ack field the last sequential TSN it has received from the peer. + * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT + * state. If the timer expires, the endpoint must re-send the SHUTDOWN + * with the updated last sequential TSN received from its peer. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_9_2_start_shutdown( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *reply; + + /* Once all its outstanding data has been acknowledged, the + * endpoint shall send a SHUTDOWN chunk to its peer including + * in the Cumulative TSN Ack field the last sequential TSN it + * has received from the peer. + */ + reply = sctp_make_shutdown(asoc, NULL); + if (!reply) + goto nomem; + + /* Set the transport for the SHUTDOWN chunk and the timeout for the + * T2-shutdown timer. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); + + /* It shall then start the T2-shutdown timer */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + /* RFC 4960 Section 9.2 + * The sender of the SHUTDOWN MAY also start an overall guard timer + * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + + /* and enter the SHUTDOWN-SENT state. */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT)); + + /* sctp-implguide 2.10 Issues with Heartbeating and failover + * + * HEARTBEAT ... is discontinued after sending either SHUTDOWN + * or SHUTDOWN-ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Generate a SHUTDOWN ACK now that everything is SACK'd. + * + * From Section 9.2: + * + * If it has no more outstanding DATA chunks, the SHUTDOWN receiver + * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own, + * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the + * endpoint must re-send the SHUTDOWN ACK. + * + * The return value is the disposition. + */ +sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = (struct sctp_chunk *) arg; + struct sctp_chunk *reply; + + /* There are 2 ways of getting here: + * 1) called in response to a SHUTDOWN chunk + * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued. + * + * For the case (2), the arg parameter is set to NULL. We need + * to check that we have a chunk before accessing it's fields. + */ + if (chunk) { + if (!sctp_vtag_verify(chunk, asoc)) + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + + /* Make sure that the SHUTDOWN chunk has a valid length. */ + if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) + return sctp_sf_violation_chunklen(net, ep, asoc, type, arg, + commands); + } + + /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver + * shall send a SHUTDOWN ACK ... + */ + reply = sctp_make_shutdown_ack(asoc, chunk); + if (!reply) + goto nomem; + + /* Set the transport for the SHUTDOWN ACK chunk and the timeout for + * the T2-shutdown timer. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); + + /* and start/restart a T2-shutdown timer of its own, */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + + if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); + + /* Enter the SHUTDOWN-ACK-SENT state. */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT)); + + /* sctp-implguide 2.10 Issues with Heartbeating and failover + * + * HEARTBEAT ... is discontinued after sending either SHUTDOWN + * or SHUTDOWN-ACK. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * Ignore the event defined as other + * + * The return value is the disposition of the event. + */ +sctp_disposition_t sctp_sf_ignore_other(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + pr_debug("%s: the event other type:%d is ignored\n", + __func__, type.other); + + return SCTP_DISPOSITION_DISCARD; +} + +/************************************************************ + * These are the state functions for handling timeout events. + ************************************************************/ + +/* + * RTX Timeout + * + * Section: 6.3.3 Handle T3-rtx Expiration + * + * Whenever the retransmission timer T3-rtx expires for a destination + * address, do the following: + * [See below] + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_transport *transport = arg; + + SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS); + + if (asoc->overall_error_count >= asoc->max_retrans) { + if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) { + /* + * We are here likely because the receiver had its rwnd + * closed for a while and we have not been able to + * transmit the locally queued data within the maximum + * retransmission attempts limit. Start the T5 + * shutdown guard timer to give the receiver one last + * chance and some additional time to recover before + * aborting. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE, + SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_DELETE_TCB; + } + } + + /* E1) For the destination address for which the timer + * expires, adjust its ssthresh with rules defined in Section + * 7.2.3 and set the cwnd <- MTU. + */ + + /* E2) For the destination address for which the timer + * expires, set RTO <- RTO * 2 ("back off the timer"). The + * maximum value discussed in rule C7 above (RTO.max) may be + * used to provide an upper bound to this doubling operation. + */ + + /* E3) Determine how many of the earliest (i.e., lowest TSN) + * outstanding DATA chunks for the address for which the + * T3-rtx has expired will fit into a single packet, subject + * to the MTU constraint for the path corresponding to the + * destination transport address to which the retransmission + * is being sent (this may be different from the address for + * which the timer expires [see Section 6.4]). Call this + * value K. Bundle and retransmit those K DATA chunks in a + * single packet to the destination endpoint. + * + * Note: Any DATA chunks that were sent to the address for + * which the T3-rtx timer expired but did not fit in one MTU + * (rule E3 above), should be marked for retransmission and + * sent as soon as cwnd allows (normally when a SACK arrives). + */ + + /* Do some failure management (Section 8.2). */ + sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); + + /* NB: Rules E4 and F1 are implicit in R1. */ + sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * Generate delayed SACK on timeout + * + * Section: 6.2 Acknowledgement on Reception of DATA Chunks + * + * The guidelines on delayed acknowledgement algorithm specified in + * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an + * acknowledgement SHOULD be generated for at least every second packet + * (not every second DATA chunk) received, and SHOULD be generated + * within 200 ms of the arrival of any unacknowledged DATA chunk. In + * some situations it may be beneficial for an SCTP transmitter to be + * more conservative than the algorithms detailed in this document + * allow. However, an SCTP transmitter MUST NOT be more aggressive than + * the following algorithms allow. + */ +sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS); + sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); + return SCTP_DISPOSITION_CONSUME; +} + +/* + * sctp_sf_t1_init_timer_expire + * + * Section: 4 Note: 2 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * RFC 2960 Section 4 Notes + * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT + * and re-start the T1-init timer without changing state. This MUST + * be repeated up to 'Max.Init.Retransmits' times. After that, the + * endpoint MUST abort the initialization process and report the + * error to SCTP user. + * + * Outputs + * (timers, events) + * + */ +sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *repl = NULL; + struct sctp_bind_addr *bp; + int attempts = asoc->init_err_counter + 1; + + pr_debug("%s: timer T1 expired (INIT)\n", __func__); + + SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS); + + if (attempts <= asoc->max_init_attempts) { + bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; + repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); + if (!repl) + return SCTP_DISPOSITION_NOMEM; + + /* Choose transport for INIT. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, + SCTP_CHUNK(repl)); + + /* Issue a sideeffect to do the needed accounting. */ + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + } else { + pr_debug("%s: giving up on INIT, attempts:%d " + "max_init_attempts:%d\n", __func__, attempts, + asoc->max_init_attempts); + + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + return SCTP_DISPOSITION_DELETE_TCB; + } + + return SCTP_DISPOSITION_CONSUME; +} + +/* + * sctp_sf_t1_cookie_timer_expire + * + * Section: 4 Note: 2 + * Verification Tag: + * Inputs + * (endpoint, asoc) + * + * RFC 2960 Section 4 Notes + * 3) If the T1-cookie timer expires, the endpoint MUST retransmit + * COOKIE ECHO and re-start the T1-cookie timer without changing + * state. This MUST be repeated up to 'Max.Init.Retransmits' times. + * After that, the endpoint MUST abort the initialization process and + * report the error to SCTP user. + * + * Outputs + * (timers, events) + * + */ +sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *repl = NULL; + int attempts = asoc->init_err_counter + 1; + + pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__); + + SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS); + + if (attempts <= asoc->max_init_attempts) { + repl = sctp_make_cookie_echo(asoc, NULL); + if (!repl) + return SCTP_DISPOSITION_NOMEM; + + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT, + SCTP_CHUNK(repl)); + /* Issue a sideeffect to do the needed accounting. */ + sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); + } else { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + return SCTP_DISPOSITION_DELETE_TCB; + } + + return SCTP_DISPOSITION_CONSUME; +} + +/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN + * with the updated last sequential TSN received from its peer. + * + * An endpoint should limit the number of retransmissions of the + * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'. + * If this threshold is exceeded the endpoint should destroy the TCB and + * MUST report the peer endpoint unreachable to the upper layer (and + * thus the association enters the CLOSED state). The reception of any + * packet from its peer (i.e. as the peer sends all of its queued DATA + * chunks) should clear the endpoint's retransmission count and restart + * the T2-Shutdown timer, giving its peer ample opportunity to transmit + * all of its queued DATA chunks that have not yet been sent. + */ +sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *reply = NULL; + + pr_debug("%s: timer T2 expired\n", __func__); + + SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS); + + ((struct sctp_association *)asoc)->shutdown_retries++; + + if (asoc->overall_error_count >= asoc->max_retrans) { + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_DELETE_TCB; + } + + switch (asoc->state) { + case SCTP_STATE_SHUTDOWN_SENT: + reply = sctp_make_shutdown(asoc, NULL); + break; + + case SCTP_STATE_SHUTDOWN_ACK_SENT: + reply = sctp_make_shutdown_ack(asoc, NULL); + break; + + default: + BUG(); + break; + } + + if (!reply) + goto nomem; + + /* Do some failure management (Section 8.2). + * If we remove the transport an SHUTDOWN was last sent to, don't + * do failure management. + */ + if (asoc->shutdown_last_sent_to) + sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, + SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); + + /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for + * the T2-shutdown timer. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); + + /* Restart the T2-shutdown timer. */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + return SCTP_DISPOSITION_CONSUME; + +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* + * ADDIP Section 4.1 ASCONF CHunk Procedures + * If the T4 RTO timer expires the endpoint should do B1 to B5 + */ +sctp_disposition_t sctp_sf_t4_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *chunk = asoc->addip_last_asconf; + struct sctp_transport *transport = chunk->transport; + + SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS); + + /* ADDIP 4.1 B1) Increment the error counters and perform path failure + * detection on the appropriate destination address as defined in + * RFC2960 [5] section 8.1 and 8.2. + */ + if (transport) + sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, + SCTP_TRANSPORT(transport)); + + /* Reconfig T4 timer and transport. */ + sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); + + /* ADDIP 4.1 B2) Increment the association error counters and perform + * endpoint failure detection on the association as defined in + * RFC2960 [5] section 8.1 and 8.2. + * association error counter is incremented in SCTP_CMD_STRIKE. + */ + if (asoc->overall_error_count >= asoc->max_retrans) { + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_DISPOSITION_ABORT; + } + + /* ADDIP 4.1 B3) Back-off the destination address RTO value to which + * the ASCONF chunk was sent by doubling the RTO timer value. + * This is done in SCTP_CMD_STRIKE. + */ + + /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible + * choose an alternate destination address (please refer to RFC2960 + * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this + * chunk, it MUST be the same (including its serial number) as the last + * ASCONF sent. + */ + sctp_chunk_hold(asoc->addip_last_asconf); + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(asoc->addip_last_asconf)); + + /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different + * destination is selected, then the RTO used will be that of the new + * destination address. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, + SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); + + return SCTP_DISPOSITION_CONSUME; +} + +/* sctpimpguide-05 Section 2.12.2 + * The sender of the SHUTDOWN MAY also start an overall guard timer + * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. + * At the expiration of this timer the sender SHOULD abort the association + * by sending an ABORT chunk. + */ +sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + struct sctp_chunk *reply = NULL; + + pr_debug("%s: timer T5 expired\n", __func__); + + SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS); + + reply = sctp_make_abort(asoc, NULL, 0); + if (!reply) + goto nomem; + + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ETIMEDOUT)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_ERROR)); + + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + + return SCTP_DISPOSITION_DELETE_TCB; +nomem: + return SCTP_DISPOSITION_NOMEM; +} + +/* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires, + * the association is automatically closed by starting the shutdown process. + * The work that needs to be done is same as when SHUTDOWN is initiated by + * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). + */ +sctp_disposition_t sctp_sf_autoclose_timer_expire( + struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + int disposition; + + SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS); + + /* From 9.2 Shutdown of an Association + * Upon receipt of the SHUTDOWN primitive from its upper + * layer, the endpoint enters SHUTDOWN-PENDING state and + * remains there until all outstanding data has been + * acknowledged by its peer. The endpoint accepts no new data + * from its upper layer, but retransmits data to the far end + * if necessary to fill gaps. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, + SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); + + disposition = SCTP_DISPOSITION_CONSUME; + if (sctp_outq_is_empty(&asoc->outqueue)) { + disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type, + arg, commands); + } + return disposition; +} + +/***************************************************************************** + * These are sa state functions which could apply to all types of events. + ****************************************************************************/ + +/* + * This table entry is not implemented. + * + * Inputs + * (endpoint, asoc, chunk) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_not_impl(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + return SCTP_DISPOSITION_NOT_IMPL; +} + +/* + * This table entry represents a bug. + * + * Inputs + * (endpoint, asoc, chunk) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_bug(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + return SCTP_DISPOSITION_BUG; +} + +/* + * This table entry represents the firing of a timer in the wrong state. + * Since timer deletion cannot be guaranteed a timer 'may' end up firing + * when the association is in the wrong state. This event should + * be ignored, so as to prevent any rearming of the timer. + * + * Inputs + * (endpoint, asoc, chunk) + * + * The return value is the disposition of the chunk. + */ +sctp_disposition_t sctp_sf_timer_ignore(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const sctp_subtype_t type, + void *arg, + sctp_cmd_seq_t *commands) +{ + pr_debug("%s: timer %d ignored\n", __func__, type.chunk); + + return SCTP_DISPOSITION_CONSUME; +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* Pull the SACK chunk based on the SACK header. */ +static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) +{ + struct sctp_sackhdr *sack; + unsigned int len; + __u16 num_blocks; + __u16 num_dup_tsns; + + /* Protect ourselves from reading too far into + * the skb from a bogus sender. + */ + sack = (struct sctp_sackhdr *) chunk->skb->data; + + num_blocks = ntohs(sack->num_gap_ack_blocks); + num_dup_tsns = ntohs(sack->num_dup_tsns); + len = sizeof(struct sctp_sackhdr); + len += (num_blocks + num_dup_tsns) * sizeof(__u32); + if (len > chunk->skb->len) + return NULL; + + skb_pull(chunk->skb, len); + + return sack; +} + +/* Create an ABORT packet to be sent as a response, with the specified + * error causes. + */ +static struct sctp_packet *sctp_abort_pkt_new(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + struct sctp_chunk *chunk, + const void *payload, + size_t paylen) +{ + struct sctp_packet *packet; + struct sctp_chunk *abort; + + packet = sctp_ootb_pkt_new(net, asoc, chunk); + + if (packet) { + /* Make an ABORT. + * The T bit will be set if the asoc is NULL. + */ + abort = sctp_make_abort(asoc, chunk, paylen); + if (!abort) { + sctp_ootb_pkt_free(packet); + return NULL; + } + + /* Reflect vtag if T-Bit is set */ + if (sctp_test_T_bit(abort)) + packet->vtag = ntohl(chunk->sctp_hdr->vtag); + + /* Add specified error causes, i.e., payload, to the + * end of the chunk. + */ + sctp_addto_chunk(abort, paylen, payload); + + /* Set the skb to the belonging sock for accounting. */ + abort->skb->sk = ep->base.sk; + + sctp_packet_append_chunk(packet, abort); + + } + + return packet; +} + +/* Allocate a packet for responding in the OOTB conditions. */ +static struct sctp_packet *sctp_ootb_pkt_new(struct net *net, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk) +{ + struct sctp_packet *packet; + struct sctp_transport *transport; + __u16 sport; + __u16 dport; + __u32 vtag; + + /* Get the source and destination port from the inbound packet. */ + sport = ntohs(chunk->sctp_hdr->dest); + dport = ntohs(chunk->sctp_hdr->source); + + /* The V-tag is going to be the same as the inbound packet if no + * association exists, otherwise, use the peer's vtag. + */ + if (asoc) { + /* Special case the INIT-ACK as there is no peer's vtag + * yet. + */ + switch (chunk->chunk_hdr->type) { + case SCTP_CID_INIT_ACK: + { + sctp_initack_chunk_t *initack; + + initack = (sctp_initack_chunk_t *)chunk->chunk_hdr; + vtag = ntohl(initack->init_hdr.init_tag); + break; + } + default: + vtag = asoc->peer.i.init_tag; + break; + } + } else { + /* Special case the INIT and stale COOKIE_ECHO as there is no + * vtag yet. + */ + switch (chunk->chunk_hdr->type) { + case SCTP_CID_INIT: + { + sctp_init_chunk_t *init; + + init = (sctp_init_chunk_t *)chunk->chunk_hdr; + vtag = ntohl(init->init_hdr.init_tag); + break; + } + default: + vtag = ntohl(chunk->sctp_hdr->vtag); + break; + } + } + + /* Make a transport for the bucket, Eliza... */ + transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC); + if (!transport) + goto nomem; + + /* Cache a route for the transport with the chunk's destination as + * the source address. + */ + sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, + sctp_sk(net->sctp.ctl_sock)); + + packet = sctp_packet_init(&transport->packet, transport, sport, dport); + packet = sctp_packet_config(packet, vtag, 0); + + return packet; + +nomem: + return NULL; +} + +/* Free the packet allocated earlier for responding in the OOTB condition. */ +void sctp_ootb_pkt_free(struct sctp_packet *packet) +{ + sctp_transport_free(packet->transport); +} + +/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */ +static void sctp_send_stale_cookie_err(struct net *net, + const struct sctp_endpoint *ep, + const struct sctp_association *asoc, + const struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands, + struct sctp_chunk *err_chunk) +{ + struct sctp_packet *packet; + + if (err_chunk) { + packet = sctp_ootb_pkt_new(net, asoc, chunk); + if (packet) { + struct sctp_signed_cookie *cookie; + + /* Override the OOTB vtag from the cookie. */ + cookie = chunk->subh.cookie_hdr; + packet->vtag = cookie->c.peer_vtag; + + /* Set the skb to the belonging sock for accounting. */ + err_chunk->skb->sk = ep->base.sk; + sctp_packet_append_chunk(packet, err_chunk); + sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, + SCTP_PACKET(packet)); + SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS); + } else + sctp_chunk_free (err_chunk); + } +} + + +/* Process a data chunk */ +static int sctp_eat_data(const struct sctp_association *asoc, + struct sctp_chunk *chunk, + sctp_cmd_seq_t *commands) +{ + sctp_datahdr_t *data_hdr; + struct sctp_chunk *err; + size_t datalen; + sctp_verb_t deliver; + int tmp; + __u32 tsn; + struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; + struct sock *sk = asoc->base.sk; + struct net *net = sock_net(sk); + u16 ssn; + u16 sid; + u8 ordered = 0; + + data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; + skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); + + tsn = ntohl(data_hdr->tsn); + pr_debug("%s: TSN 0x%x\n", __func__, tsn); + + /* ASSERT: Now skb->data is really the user data. */ + + /* Process ECN based congestion. + * + * Since the chunk structure is reused for all chunks within + * a packet, we use ecn_ce_done to track if we've already + * done CE processing for this packet. + * + * We need to do ECN processing even if we plan to discard the + * chunk later. + */ + + if (!chunk->ecn_ce_done) { + struct sctp_af *af; + chunk->ecn_ce_done = 1; + + af = sctp_get_af_specific( + ipver2af(ip_hdr(chunk->skb)->version)); + + if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { + /* Do real work as sideffect. */ + sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, + SCTP_U32(tsn)); + } + } + + tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); + if (tmp < 0) { + /* The TSN is too high--silently discard the chunk and + * count on it getting retransmitted later. + */ + if (chunk->asoc) + chunk->asoc->stats.outofseqtsns++; + return SCTP_IERROR_HIGH_TSN; + } else if (tmp > 0) { + /* This is a duplicate. Record it. */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); + return SCTP_IERROR_DUP_TSN; + } + + /* This is a new TSN. */ + + /* Discard if there is no room in the receive window. + * Actually, allow a little bit of overflow (up to a MTU). + */ + datalen = ntohs(chunk->chunk_hdr->length); + datalen -= sizeof(sctp_data_chunk_t); + + deliver = SCTP_CMD_CHUNK_ULP; + + /* Think about partial delivery. */ + if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { + + /* Even if we don't accept this chunk there is + * memory pressure. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); + } + + /* Spill over rwnd a little bit. Note: While allowed, this spill over + * seems a bit troublesome in that frag_point varies based on + * PMTU. In cases, such as loopback, this might be a rather + * large spill over. + */ + if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over || + (datalen > asoc->rwnd + asoc->frag_point))) { + + /* If this is the next TSN, consider reneging to make + * room. Note: Playing nice with a confused sender. A + * malicious sender can still eat up all our buffer + * space and in the future we may want to detect and + * do more drastic reneging. + */ + if (sctp_tsnmap_has_gap(map) && + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { + pr_debug("%s: reneging for tsn:%u\n", __func__, tsn); + deliver = SCTP_CMD_RENEGE; + } else { + pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n", + __func__, tsn, datalen, asoc->rwnd); + + return SCTP_IERROR_IGNORE_TSN; + } + } + + /* + * Also try to renege to limit our memory usage in the event that + * we are under memory pressure + * If we can't renege, don't worry about it, the sk_rmem_schedule + * in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our + * memory usage too much + */ + if (*sk->sk_prot_creator->memory_pressure) { + if (sctp_tsnmap_has_gap(map) && + (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { + pr_debug("%s: under pressure, reneging for tsn:%u\n", + __func__, tsn); + deliver = SCTP_CMD_RENEGE; + } + } + + /* + * Section 3.3.10.9 No User Data (9) + * + * Cause of error + * --------------- + * No User Data: This error cause is returned to the originator of a + * DATA chunk if a received DATA chunk has no user data. + */ + if (unlikely(0 == datalen)) { + err = sctp_make_abort_no_data(asoc, chunk, tsn); + if (err) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + } + /* We are going to ABORT, so we might as well stop + * processing the rest of the chunks in the packet. + */ + sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); + sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, + SCTP_ERROR(ECONNABORTED)); + sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, + SCTP_PERR(SCTP_ERROR_NO_DATA)); + SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); + SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); + return SCTP_IERROR_NO_DATA; + } + + chunk->data_accepted = 1; + + /* Note: Some chunks may get overcounted (if we drop) or overcounted + * if we renege and the chunk arrives again. + */ + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { + SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS); + if (chunk->asoc) + chunk->asoc->stats.iuodchunks++; + } else { + SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS); + if (chunk->asoc) + chunk->asoc->stats.iodchunks++; + ordered = 1; + } + + /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number + * + * If an endpoint receive a DATA chunk with an invalid stream + * identifier, it shall acknowledge the reception of the DATA chunk + * following the normal procedure, immediately send an ERROR chunk + * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) + * and discard the DATA chunk. + */ + sid = ntohs(data_hdr->stream); + if (sid >= asoc->c.sinit_max_instreams) { + /* Mark tsn as received even though we drop it */ + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); + + err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, + &data_hdr->stream, + sizeof(data_hdr->stream), + sizeof(u16)); + if (err) + sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, + SCTP_CHUNK(err)); + return SCTP_IERROR_BAD_STREAM; + } + + /* Check to see if the SSN is possible for this TSN. + * The biggest gap we can record is 4K wide. Since SSNs wrap + * at an unsigned short, there is no way that an SSN can + * wrap and for a valid TSN. We can simply check if the current + * SSN is smaller then the next expected one. If it is, it wrapped + * and is invalid. + */ + ssn = ntohs(data_hdr->ssn); + if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) { + return SCTP_IERROR_PROTO_VIOLATION; + } + + /* Send the data up to the user. Note: Schedule the + * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK + * chunk needs the updated rwnd. + */ + sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); + + return SCTP_IERROR_NO_ERROR; +} diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c new file mode 100644 index 000000000..a987d54b3 --- /dev/null +++ b/net/sctp/sm_statetable.c @@ -0,0 +1,933 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * + * This file is part of the SCTP kernel implementation + * + * These are the state tables for the SCTP state machine. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Hui Huang <hui.huang@nokia.com> + * Daisy Chang <daisyc@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/skbuff.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static const sctp_sm_table_entry_t +primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES]; +static const sctp_sm_table_entry_t +other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES]; +static const sctp_sm_table_entry_t +timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; + +static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, + sctp_cid_t cid, + sctp_state_t state); + + +static const sctp_sm_table_entry_t bug = { + .fn = sctp_sf_bug, + .name = "sctp_sf_bug" +}; + +#define DO_LOOKUP(_max, _type, _table) \ +({ \ + const sctp_sm_table_entry_t *rtn; \ + \ + if ((event_subtype._type > (_max))) { \ + pr_warn("table %p possible attack: event %d exceeds max %d\n", \ + _table, event_subtype._type, _max); \ + rtn = &bug; \ + } else \ + rtn = &_table[event_subtype._type][(int)state]; \ + \ + rtn; \ +}) + +const sctp_sm_table_entry_t *sctp_sm_lookup_event(struct net *net, + sctp_event_t event_type, + sctp_state_t state, + sctp_subtype_t event_subtype) +{ + switch (event_type) { + case SCTP_EVENT_T_CHUNK: + return sctp_chunk_event_lookup(net, event_subtype.chunk, state); + case SCTP_EVENT_T_TIMEOUT: + return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, + timeout_event_table); + case SCTP_EVENT_T_OTHER: + return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, + other_event_table); + case SCTP_EVENT_T_PRIMITIVE: + return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive, + primitive_event_table); + default: + /* Yikes! We got an illegal event type. */ + return &bug; + } +} + +#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} + +#define TYPE_SCTP_DATA { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_data_fast_4_4), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_DATA */ + +#define TYPE_SCTP_INIT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_reshutack), \ +} /* TYPE_SCTP_INIT */ + +#define TYPE_SCTP_INIT_ACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_INIT_ACK */ + +#define TYPE_SCTP_SACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_SACK */ + +#define TYPE_SCTP_HEARTBEAT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + /* This should not happen, but we are nice. */ \ + TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ +} /* TYPE_SCTP_HEARTBEAT */ + +#define TYPE_SCTP_HEARTBEAT_ACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_violation), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_HEARTBEAT_ACK */ + +#define TYPE_SCTP_ABORT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_wait_abort), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_abort), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_abort), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_abort), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_abort), \ +} /* TYPE_SCTP_ABORT */ + +#define TYPE_SCTP_SHUTDOWN { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_shut_ctsn), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_SHUTDOWN */ + +#define TYPE_SCTP_SHUTDOWN_ACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_violation), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_violation), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_violation), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ +} /* TYPE_SCTP_SHUTDOWN_ACK */ + +#define TYPE_SCTP_ERROR { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_err), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_ERROR */ + +#define TYPE_SCTP_COOKIE_ECHO { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ +} /* TYPE_SCTP_COOKIE_ECHO */ + +#define TYPE_SCTP_COOKIE_ACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_5_1E_ca), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_COOKIE_ACK */ + +#define TYPE_SCTP_ECN_ECNE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_ECN_ECNE */ + +#define TYPE_SCTP_ECN_CWR { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_ECN_CWR */ + +#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_4_C), \ +} /* TYPE_SCTP_SHUTDOWN_COMPLETE */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + * + * For base protocol (RFC 2960). + */ +static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_DATA, + TYPE_SCTP_INIT, + TYPE_SCTP_INIT_ACK, + TYPE_SCTP_SACK, + TYPE_SCTP_HEARTBEAT, + TYPE_SCTP_HEARTBEAT_ACK, + TYPE_SCTP_ABORT, + TYPE_SCTP_SHUTDOWN, + TYPE_SCTP_SHUTDOWN_ACK, + TYPE_SCTP_ERROR, + TYPE_SCTP_COOKIE_ECHO, + TYPE_SCTP_COOKIE_ACK, + TYPE_SCTP_ECN_ECNE, + TYPE_SCTP_ECN_CWR, + TYPE_SCTP_SHUTDOWN_COMPLETE, +}; /* state_fn_t chunk_event_table[][] */ + +#define TYPE_SCTP_ASCONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_ASCONF */ + +#define TYPE_SCTP_ASCONF_ACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_ASCONF_ACK */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_ASCONF, + TYPE_SCTP_ASCONF_ACK, +}; /*state_fn_t addip_chunk_event_table[][] */ + +#define TYPE_SCTP_FWD_TSN { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn_fast), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ +} /* TYPE_SCTP_FWD_TSN */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_FWD_TSN, +}; /*state_fn_t prsctp_chunk_event_table[][] */ + +#define TYPE_SCTP_AUTH { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ootb), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ +} /* TYPE_SCTP_AUTH */ + +/* The primary index for this table is the chunk type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_AUTH, +}; /*state_fn_t auth_chunk_event_table[][] */ + +static const sctp_sm_table_entry_t +chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { + /* SCTP_STATE_CLOSED */ + TYPE_SCTP_FUNC(sctp_sf_ootb), + /* SCTP_STATE_COOKIE_WAIT */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_COOKIE_ECHOED */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_ESTABLISHED */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_SHUTDOWN_PENDING */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_SHUTDOWN_SENT */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_SHUTDOWN_RECEIVED */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ + TYPE_SCTP_FUNC(sctp_sf_unk_chunk), +}; /* chunk unknown */ + + +#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_not_impl), \ +} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ + +#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_shutdown), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_shutdown),\ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_prm_shutdown), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ +} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ + +#define TYPE_SCTP_PRIMITIVE_ABORT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_abort), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_abort), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_prm_abort), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_prm_abort), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_prm_abort), \ +} /* TYPE_SCTP_PRIMITIVE_ABORT */ + +#define TYPE_SCTP_PRIMITIVE_SEND { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ +} /* TYPE_SCTP_PRIMITIVE_SEND */ + +#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ +} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ + +#define TYPE_SCTP_PRIMITIVE_ASCONF { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_error_closed), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ +} /* TYPE_SCTP_PRIMITIVE_ASCONF */ + +/* The primary index for this table is the primitive type. + * The secondary index for this table is the state. + */ +static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_PRIMITIVE_ASSOCIATE, + TYPE_SCTP_PRIMITIVE_SHUTDOWN, + TYPE_SCTP_PRIMITIVE_ABORT, + TYPE_SCTP_PRIMITIVE_SEND, + TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT, + TYPE_SCTP_PRIMITIVE_ASCONF, +}; + +#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ +} + +#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_cookie_wait_icmp_abort), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ +} + +static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_OTHER_NO_PENDING_TSN, + TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH, +}; + +#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_bug), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_t1_cookie_timer_expire), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_t1_init_timer_expire), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_t4_timer_expire), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ + /* SCTP_STATE_CLOSED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_WAIT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_COOKIE_ECHOED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_ESTABLISHED */ \ + TYPE_SCTP_FUNC(sctp_sf_autoclose_timer_expire), \ + /* SCTP_STATE_SHUTDOWN_PENDING */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ + /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ + TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ +} + +static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { + TYPE_SCTP_EVENT_TIMEOUT_NONE, + TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE, + TYPE_SCTP_EVENT_TIMEOUT_T1_INIT, + TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN, + TYPE_SCTP_EVENT_TIMEOUT_T3_RTX, + TYPE_SCTP_EVENT_TIMEOUT_T4_RTO, + TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, + TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT, + TYPE_SCTP_EVENT_TIMEOUT_SACK, + TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, +}; + +static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(struct net *net, + sctp_cid_t cid, + sctp_state_t state) +{ + if (state > SCTP_STATE_MAX) + return &bug; + + if (cid <= SCTP_CID_BASE_MAX) + return &chunk_event_table[cid][state]; + + if (net->sctp.prsctp_enable) { + if (cid == SCTP_CID_FWD_TSN) + return &prsctp_chunk_event_table[0][state]; + } + + if (net->sctp.addip_enable) { + if (cid == SCTP_CID_ASCONF) + return &addip_chunk_event_table[0][state]; + + if (cid == SCTP_CID_ASCONF_ACK) + return &addip_chunk_event_table[1][state]; + } + + if (net->sctp.auth_enable) { + if (cid == SCTP_CID_AUTH) + return &auth_chunk_event_table[0][state]; + } + + return &chunk_event_table_unknown[state]; +} diff --git a/net/sctp/socket.c b/net/sctp/socket.c new file mode 100644 index 000000000..5f6c4e613 --- /dev/null +++ b/net/sctp/socket.c @@ -0,0 +1,7419 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2003 Intel Corp. + * Copyright (c) 2001-2002 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * These functions interface with the sockets layer to implement the + * SCTP Extensions for the Sockets API. + * + * Note that the descriptions from the specification are USER level + * functions--this file is the functions which populate the struct proto + * for SCTP which is the BOTTOM of the sockets interface. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Narasimha Budihal <narsi@refcode.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Xingang Guo <xingang.guo@intel.com> + * Daisy Chang <daisyc@us.ibm.com> + * Sridhar Samudrala <samudrala@us.ibm.com> + * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> + * Ardelle Fan <ardelle.fan@intel.com> + * Ryan Layer <rmlayer@us.ibm.com> + * Anup Pemmaiah <pemmaiah@cc.usu.edu> + * Kevin Gao <kevin.gao@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/wait.h> +#include <linux/time.h> +#include <linux/ip.h> +#include <linux/capability.h> +#include <linux/fcntl.h> +#include <linux/poll.h> +#include <linux/init.h> +#include <linux/crypto.h> +#include <linux/slab.h> +#include <linux/file.h> +#include <linux/compat.h> + +#include <net/ip.h> +#include <net/icmp.h> +#include <net/route.h> +#include <net/ipv6.h> +#include <net/inet_common.h> +#include <net/busy_poll.h> + +#include <linux/socket.h> /* for sa_family_t */ +#include <linux/export.h> +#include <net/sock.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Forward declarations for internal helper functions. */ +static int sctp_writeable(struct sock *sk); +static void sctp_wfree(struct sk_buff *skb); +static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, + size_t msg_len); +static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p); +static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); +static int sctp_wait_for_accept(struct sock *sk, long timeo); +static void sctp_wait_for_close(struct sock *sk, long timeo); +static void sctp_destruct_sock(struct sock *sk); +static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, + union sctp_addr *addr, int len); +static int sctp_bindx_add(struct sock *, struct sockaddr *, int); +static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); +static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); +static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); +static int sctp_send_asconf(struct sctp_association *asoc, + struct sctp_chunk *chunk); +static int sctp_do_bind(struct sock *, union sctp_addr *, int); +static int sctp_autobind(struct sock *sk); +static void sctp_sock_migrate(struct sock *, struct sock *, + struct sctp_association *, sctp_socket_type_t); + +static int sctp_memory_pressure; +static atomic_long_t sctp_memory_allocated; +struct percpu_counter sctp_sockets_allocated; + +static void sctp_enter_memory_pressure(struct sock *sk) +{ + sctp_memory_pressure = 1; +} + + +/* Get the sndbuf space available at the time on the association. */ +static inline int sctp_wspace(struct sctp_association *asoc) +{ + int amt; + + if (asoc->ep->sndbuf_policy) + amt = asoc->sndbuf_used; + else + amt = sk_wmem_alloc_get(asoc->base.sk); + + if (amt >= asoc->base.sk->sk_sndbuf) { + if (asoc->base.sk->sk_userlocks & SOCK_SNDBUF_LOCK) + amt = 0; + else { + amt = sk_stream_wspace(asoc->base.sk); + if (amt < 0) + amt = 0; + } + } else { + amt = asoc->base.sk->sk_sndbuf - amt; + } + return amt; +} + +/* Increment the used sndbuf space count of the corresponding association by + * the size of the outgoing data chunk. + * Also, set the skb destructor for sndbuf accounting later. + * + * Since it is always 1-1 between chunk and skb, and also a new skb is always + * allocated for chunk bundling in sctp_packet_transmit(), we can use the + * destructor in the data chunk skb for the purpose of the sndbuf space + * tracking. + */ +static inline void sctp_set_owner_w(struct sctp_chunk *chunk) +{ + struct sctp_association *asoc = chunk->asoc; + struct sock *sk = asoc->base.sk; + + /* The sndbuf space is tracked per association. */ + sctp_association_hold(asoc); + + skb_set_owner_w(chunk->skb, sk); + + chunk->skb->destructor = sctp_wfree; + /* Save the chunk pointer in skb for sctp_wfree to use later. */ + skb_shinfo(chunk->skb)->destructor_arg = chunk; + + asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + + sizeof(struct sk_buff) + + sizeof(struct sctp_chunk); + + atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + sk->sk_wmem_queued += chunk->skb->truesize; + sk_mem_charge(sk, chunk->skb->truesize); +} + +/* Verify that this is a valid address. */ +static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, + int len) +{ + struct sctp_af *af; + + /* Verify basic sockaddr. */ + af = sctp_sockaddr_af(sctp_sk(sk), addr, len); + if (!af) + return -EINVAL; + + /* Is this a valid SCTP address? */ + if (!af->addr_valid(addr, sctp_sk(sk), NULL)) + return -EINVAL; + + if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) + return -EINVAL; + + return 0; +} + +/* Look up the association by its id. If this is not a UDP-style + * socket, the ID field is always ignored. + */ +struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) +{ + struct sctp_association *asoc = NULL; + + /* If this is not a UDP-style socket, assoc id should be ignored. */ + if (!sctp_style(sk, UDP)) { + /* Return NULL if the socket state is not ESTABLISHED. It + * could be a TCP-style listening socket or a socket which + * hasn't yet called connect() to establish an association. + */ + if (!sctp_sstate(sk, ESTABLISHED)) + return NULL; + + /* Get the first and the only association from the list. */ + if (!list_empty(&sctp_sk(sk)->ep->asocs)) + asoc = list_entry(sctp_sk(sk)->ep->asocs.next, + struct sctp_association, asocs); + return asoc; + } + + /* Otherwise this is a UDP-style socket. */ + if (!id || (id == (sctp_assoc_t)-1)) + return NULL; + + spin_lock_bh(&sctp_assocs_id_lock); + asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); + spin_unlock_bh(&sctp_assocs_id_lock); + + if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) + return NULL; + + return asoc; +} + +/* Look up the transport from an address and an assoc id. If both address and + * id are specified, the associations matching the address and the id should be + * the same. + */ +static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, + struct sockaddr_storage *addr, + sctp_assoc_t id) +{ + struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; + struct sctp_transport *transport; + union sctp_addr *laddr = (union sctp_addr *)addr; + + addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, + laddr, + &transport); + + if (!addr_asoc) + return NULL; + + id_asoc = sctp_id2assoc(sk, id); + if (id_asoc && (id_asoc != addr_asoc)) + return NULL; + + sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), + (union sctp_addr *)addr); + + return transport; +} + +/* API 3.1.2 bind() - UDP Style Syntax + * The syntax of bind() is, + * + * ret = bind(int sd, struct sockaddr *addr, int addrlen); + * + * sd - the socket descriptor returned by socket(). + * addr - the address structure (struct sockaddr_in or struct + * sockaddr_in6 [RFC 2553]), + * addr_len - the size of the address structure. + */ +static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len) +{ + int retval = 0; + + lock_sock(sk); + + pr_debug("%s: sk:%p, addr:%p, addr_len:%d\n", __func__, sk, + addr, addr_len); + + /* Disallow binding twice. */ + if (!sctp_sk(sk)->ep->base.bind_addr.port) + retval = sctp_do_bind(sk, (union sctp_addr *)addr, + addr_len); + else + retval = -EINVAL; + + release_sock(sk); + + return retval; +} + +static long sctp_get_port_local(struct sock *, union sctp_addr *); + +/* Verify this is a valid sockaddr. */ +static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, + union sctp_addr *addr, int len) +{ + struct sctp_af *af; + + /* Check minimum size. */ + if (len < sizeof (struct sockaddr)) + return NULL; + + /* V4 mapped address are really of AF_INET family */ + if (addr->sa.sa_family == AF_INET6 && + ipv6_addr_v4mapped(&addr->v6.sin6_addr)) { + if (!opt->pf->af_supported(AF_INET, opt)) + return NULL; + } else { + /* Does this PF support this AF? */ + if (!opt->pf->af_supported(addr->sa.sa_family, opt)) + return NULL; + } + + /* If we get this far, af is valid. */ + af = sctp_get_af_specific(addr->sa.sa_family); + + if (len < af->sockaddr_len) + return NULL; + + return af; +} + +/* Bind a local address either to an endpoint or to an association. */ +static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_endpoint *ep = sp->ep; + struct sctp_bind_addr *bp = &ep->base.bind_addr; + struct sctp_af *af; + unsigned short snum; + int ret = 0; + + /* Common sockaddr verification. */ + af = sctp_sockaddr_af(sp, addr, len); + if (!af) { + pr_debug("%s: sk:%p, newaddr:%p, len:%d EINVAL\n", + __func__, sk, addr, len); + return -EINVAL; + } + + snum = ntohs(addr->v4.sin_port); + + pr_debug("%s: sk:%p, new addr:%pISc, port:%d, new port:%d, len:%d\n", + __func__, sk, &addr->sa, bp->port, snum, len); + + /* PF specific bind() address verification. */ + if (!sp->pf->bind_verify(sp, addr)) + return -EADDRNOTAVAIL; + + /* We must either be unbound, or bind to the same port. + * It's OK to allow 0 ports if we are already bound. + * We'll just inhert an already bound port in this case + */ + if (bp->port) { + if (!snum) + snum = bp->port; + else if (snum != bp->port) { + pr_debug("%s: new port %d doesn't match existing port " + "%d\n", __func__, snum, bp->port); + return -EINVAL; + } + } + + if (snum && snum < PROT_SOCK && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) + return -EACCES; + + /* See if the address matches any of the addresses we may have + * already bound before checking against other endpoints. + */ + if (sctp_bind_addr_match(bp, addr, sp)) + return -EINVAL; + + /* Make sure we are allowed to bind here. + * The function sctp_get_port_local() does duplicate address + * detection. + */ + addr->v4.sin_port = htons(snum); + if ((ret = sctp_get_port_local(sk, addr))) { + return -EADDRINUSE; + } + + /* Refresh ephemeral port. */ + if (!bp->port) + bp->port = inet_sk(sk)->inet_num; + + /* Add the address to the bind address list. + * Use GFP_ATOMIC since BHs will be disabled. + */ + ret = sctp_add_bind_addr(bp, addr, SCTP_ADDR_SRC, GFP_ATOMIC); + + /* Copy back into socket for getsockname() use. */ + if (!ret) { + inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); + sp->pf->to_sk_saddr(addr, sk); + } + + return ret; +} + + /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks + * + * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged + * at any one time. If a sender, after sending an ASCONF chunk, decides + * it needs to transfer another ASCONF Chunk, it MUST wait until the + * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a + * subsequent ASCONF. Note this restriction binds each side, so at any + * time two ASCONF may be in-transit on any given association (one sent + * from each endpoint). + */ +static int sctp_send_asconf(struct sctp_association *asoc, + struct sctp_chunk *chunk) +{ + struct net *net = sock_net(asoc->base.sk); + int retval = 0; + + /* If there is an outstanding ASCONF chunk, queue it for later + * transmission. + */ + if (asoc->addip_last_asconf) { + list_add_tail(&chunk->list, &asoc->addip_chunk_list); + goto out; + } + + /* Hold the chunk until an ASCONF_ACK is received. */ + sctp_chunk_hold(chunk); + retval = sctp_primitive_ASCONF(net, asoc, chunk); + if (retval) + sctp_chunk_free(chunk); + else + asoc->addip_last_asconf = chunk; + +out: + return retval; +} + +/* Add a list of addresses as bind addresses to local endpoint or + * association. + * + * Basically run through each address specified in the addrs/addrcnt + * array/length pair, determine if it is IPv6 or IPv4 and call + * sctp_do_bind() on it. + * + * If any of them fails, then the operation will be reversed and the + * ones that were added will be removed. + * + * Only sctp_setsockopt_bindx() is supposed to call this function. + */ +static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) +{ + int cnt; + int retval = 0; + void *addr_buf; + struct sockaddr *sa_addr; + struct sctp_af *af; + + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", __func__, sk, + addrs, addrcnt); + + addr_buf = addrs; + for (cnt = 0; cnt < addrcnt; cnt++) { + /* The list may contain either IPv4 or IPv6 address; + * determine the address length for walking thru the list. + */ + sa_addr = addr_buf; + af = sctp_get_af_specific(sa_addr->sa_family); + if (!af) { + retval = -EINVAL; + goto err_bindx_add; + } + + retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, + af->sockaddr_len); + + addr_buf += af->sockaddr_len; + +err_bindx_add: + if (retval < 0) { + /* Failed. Cleanup the ones that have been added */ + if (cnt > 0) + sctp_bindx_rem(sk, addrs, cnt); + return retval; + } + } + + return retval; +} + +/* Send an ASCONF chunk with Add IP address parameters to all the peers of the + * associations that are part of the endpoint indicating that a list of local + * addresses are added to the endpoint. + * + * If any of the addresses is already in the bind address list of the + * association, we do not send the chunk for that association. But it will not + * affect other associations. + * + * Only sctp_setsockopt_bindx() is supposed to call this function. + */ +static int sctp_send_asconf_add_ip(struct sock *sk, + struct sockaddr *addrs, + int addrcnt) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + struct sctp_endpoint *ep; + struct sctp_association *asoc; + struct sctp_bind_addr *bp; + struct sctp_chunk *chunk; + struct sctp_sockaddr_entry *laddr; + union sctp_addr *addr; + union sctp_addr saveaddr; + void *addr_buf; + struct sctp_af *af; + struct list_head *p; + int i; + int retval = 0; + + if (!net->sctp.addip_enable) + return retval; + + sp = sctp_sk(sk); + ep = sp->ep; + + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); + + list_for_each_entry(asoc, &ep->asocs, asocs) { + if (!asoc->peer.asconf_capable) + continue; + + if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) + continue; + + if (!sctp_state(asoc, ESTABLISHED)) + continue; + + /* Check if any address in the packed array of addresses is + * in the bind address list of the association. If so, + * do not send the asconf chunk to its peer, but continue with + * other associations. + */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + addr = addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + if (!af) { + retval = -EINVAL; + goto out; + } + + if (sctp_assoc_lookup_laddr(asoc, addr)) + break; + + addr_buf += af->sockaddr_len; + } + if (i < addrcnt) + continue; + + /* Use the first valid address in bind addr list of + * association as Address Parameter of ASCONF CHUNK. + */ + bp = &asoc->base.bind_addr; + p = bp->address_list.next; + laddr = list_entry(p, struct sctp_sockaddr_entry, list); + chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, + addrcnt, SCTP_PARAM_ADD_IP); + if (!chunk) { + retval = -ENOMEM; + goto out; + } + + /* Add the new addresses to the bind address list with + * use_as_src set to 0. + */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + addr = addr_buf; + af = sctp_get_af_specific(addr->v4.sin_family); + memcpy(&saveaddr, addr, af->sockaddr_len); + retval = sctp_add_bind_addr(bp, &saveaddr, + SCTP_ADDR_NEW, GFP_ATOMIC); + addr_buf += af->sockaddr_len; + } + if (asoc->src_out_of_asoc_ok) { + struct sctp_transport *trans; + + list_for_each_entry(trans, + &asoc->peer.transport_addr_list, transports) { + /* Clear the source and route cache */ + dst_release(trans->dst); + trans->cwnd = min(4*asoc->pathmtu, max_t(__u32, + 2*asoc->pathmtu, 4380)); + trans->ssthresh = asoc->peer.i.a_rwnd; + trans->rto = asoc->rto_initial; + sctp_max_rto(asoc, trans); + trans->rtt = trans->srtt = trans->rttvar = 0; + sctp_transport_route(trans, NULL, + sctp_sk(asoc->base.sk)); + } + } + retval = sctp_send_asconf(asoc, chunk); + } + +out: + return retval; +} + +/* Remove a list of addresses from bind addresses list. Do not remove the + * last address. + * + * Basically run through each address specified in the addrs/addrcnt + * array/length pair, determine if it is IPv6 or IPv4 and call + * sctp_del_bind() on it. + * + * If any of them fails, then the operation will be reversed and the + * ones that were removed will be added back. + * + * At least one address has to be left; if only one address is + * available, the operation will return -EBUSY. + * + * Only sctp_setsockopt_bindx() is supposed to call this function. + */ +static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_endpoint *ep = sp->ep; + int cnt; + struct sctp_bind_addr *bp = &ep->base.bind_addr; + int retval = 0; + void *addr_buf; + union sctp_addr *sa_addr; + struct sctp_af *af; + + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); + + addr_buf = addrs; + for (cnt = 0; cnt < addrcnt; cnt++) { + /* If the bind address list is empty or if there is only one + * bind address, there is nothing more to be removed (we need + * at least one address here). + */ + if (list_empty(&bp->address_list) || + (sctp_list_single_entry(&bp->address_list))) { + retval = -EBUSY; + goto err_bindx_rem; + } + + sa_addr = addr_buf; + af = sctp_get_af_specific(sa_addr->sa.sa_family); + if (!af) { + retval = -EINVAL; + goto err_bindx_rem; + } + + if (!af->addr_valid(sa_addr, sp, NULL)) { + retval = -EADDRNOTAVAIL; + goto err_bindx_rem; + } + + if (sa_addr->v4.sin_port && + sa_addr->v4.sin_port != htons(bp->port)) { + retval = -EINVAL; + goto err_bindx_rem; + } + + if (!sa_addr->v4.sin_port) + sa_addr->v4.sin_port = htons(bp->port); + + /* FIXME - There is probably a need to check if sk->sk_saddr and + * sk->sk_rcv_addr are currently set to one of the addresses to + * be removed. This is something which needs to be looked into + * when we are fixing the outstanding issues with multi-homing + * socket routing and failover schemes. Refer to comments in + * sctp_do_bind(). -daisy + */ + retval = sctp_del_bind_addr(bp, sa_addr); + + addr_buf += af->sockaddr_len; +err_bindx_rem: + if (retval < 0) { + /* Failed. Add the ones that has been removed back */ + if (cnt > 0) + sctp_bindx_add(sk, addrs, cnt); + return retval; + } + } + + return retval; +} + +/* Send an ASCONF chunk with Delete IP address parameters to all the peers of + * the associations that are part of the endpoint indicating that a list of + * local addresses are removed from the endpoint. + * + * If any of the addresses is already in the bind address list of the + * association, we do not send the chunk for that association. But it will not + * affect other associations. + * + * Only sctp_setsockopt_bindx() is supposed to call this function. + */ +static int sctp_send_asconf_del_ip(struct sock *sk, + struct sockaddr *addrs, + int addrcnt) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + struct sctp_endpoint *ep; + struct sctp_association *asoc; + struct sctp_transport *transport; + struct sctp_bind_addr *bp; + struct sctp_chunk *chunk; + union sctp_addr *laddr; + void *addr_buf; + struct sctp_af *af; + struct sctp_sockaddr_entry *saddr; + int i; + int retval = 0; + int stored = 0; + + chunk = NULL; + if (!net->sctp.addip_enable) + return retval; + + sp = sctp_sk(sk); + ep = sp->ep; + + pr_debug("%s: sk:%p, addrs:%p, addrcnt:%d\n", + __func__, sk, addrs, addrcnt); + + list_for_each_entry(asoc, &ep->asocs, asocs) { + + if (!asoc->peer.asconf_capable) + continue; + + if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) + continue; + + if (!sctp_state(asoc, ESTABLISHED)) + continue; + + /* Check if any address in the packed array of addresses is + * not present in the bind address list of the association. + * If so, do not send the asconf chunk to its peer, but + * continue with other associations. + */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + laddr = addr_buf; + af = sctp_get_af_specific(laddr->v4.sin_family); + if (!af) { + retval = -EINVAL; + goto out; + } + + if (!sctp_assoc_lookup_laddr(asoc, laddr)) + break; + + addr_buf += af->sockaddr_len; + } + if (i < addrcnt) + continue; + + /* Find one address in the association's bind address list + * that is not in the packed array of addresses. This is to + * make sure that we do not delete all the addresses in the + * association. + */ + bp = &asoc->base.bind_addr; + laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, + addrcnt, sp); + if ((laddr == NULL) && (addrcnt == 1)) { + if (asoc->asconf_addr_del_pending) + continue; + asoc->asconf_addr_del_pending = + kzalloc(sizeof(union sctp_addr), GFP_ATOMIC); + if (asoc->asconf_addr_del_pending == NULL) { + retval = -ENOMEM; + goto out; + } + asoc->asconf_addr_del_pending->sa.sa_family = + addrs->sa_family; + asoc->asconf_addr_del_pending->v4.sin_port = + htons(bp->port); + if (addrs->sa_family == AF_INET) { + struct sockaddr_in *sin; + + sin = (struct sockaddr_in *)addrs; + asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr; + } else if (addrs->sa_family == AF_INET6) { + struct sockaddr_in6 *sin6; + + sin6 = (struct sockaddr_in6 *)addrs; + asoc->asconf_addr_del_pending->v6.sin6_addr = sin6->sin6_addr; + } + + pr_debug("%s: keep the last address asoc:%p %pISc at %p\n", + __func__, asoc, &asoc->asconf_addr_del_pending->sa, + asoc->asconf_addr_del_pending); + + asoc->src_out_of_asoc_ok = 1; + stored = 1; + goto skip_mkasconf; + } + + if (laddr == NULL) + return -EINVAL; + + /* We do not need RCU protection throughout this loop + * because this is done under a socket lock from the + * setsockopt call. + */ + chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, + SCTP_PARAM_DEL_IP); + if (!chunk) { + retval = -ENOMEM; + goto out; + } + +skip_mkasconf: + /* Reset use_as_src flag for the addresses in the bind address + * list that are to be deleted. + */ + addr_buf = addrs; + for (i = 0; i < addrcnt; i++) { + laddr = addr_buf; + af = sctp_get_af_specific(laddr->v4.sin_family); + list_for_each_entry(saddr, &bp->address_list, list) { + if (sctp_cmp_addr_exact(&saddr->a, laddr)) + saddr->state = SCTP_ADDR_DEL; + } + addr_buf += af->sockaddr_len; + } + + /* Update the route and saddr entries for all the transports + * as some of the addresses in the bind address list are + * about to be deleted and cannot be used as source addresses. + */ + list_for_each_entry(transport, &asoc->peer.transport_addr_list, + transports) { + dst_release(transport->dst); + sctp_transport_route(transport, NULL, + sctp_sk(asoc->base.sk)); + } + + if (stored) + /* We don't need to transmit ASCONF */ + continue; + retval = sctp_send_asconf(asoc, chunk); + } +out: + return retval; +} + +/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */ +int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw) +{ + struct sock *sk = sctp_opt2sk(sp); + union sctp_addr *addr; + struct sctp_af *af; + + /* It is safe to write port space in caller. */ + addr = &addrw->a; + addr->v4.sin_port = htons(sp->ep->base.bind_addr.port); + af = sctp_get_af_specific(addr->sa.sa_family); + if (!af) + return -EINVAL; + if (sctp_verify_addr(sk, addr, af->sockaddr_len)) + return -EINVAL; + + if (addrw->state == SCTP_ADDR_NEW) + return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1); + else + return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1); +} + +/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() + * + * API 8.1 + * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, + * int flags); + * + * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. + * If the sd is an IPv6 socket, the addresses passed can either be IPv4 + * or IPv6 addresses. + * + * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see + * Section 3.1.2 for this usage. + * + * addrs is a pointer to an array of one or more socket addresses. Each + * address is contained in its appropriate structure (i.e. struct + * sockaddr_in or struct sockaddr_in6) the family of the address type + * must be used to distinguish the address length (note that this + * representation is termed a "packed array" of addresses). The caller + * specifies the number of addresses in the array with addrcnt. + * + * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns + * -1, and sets errno to the appropriate error code. + * + * For SCTP, the port given in each socket address must be the same, or + * sctp_bindx() will fail, setting errno to EINVAL. + * + * The flags parameter is formed from the bitwise OR of zero or more of + * the following currently defined flags: + * + * SCTP_BINDX_ADD_ADDR + * + * SCTP_BINDX_REM_ADDR + * + * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the + * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given + * addresses from the association. The two flags are mutually exclusive; + * if both are given, sctp_bindx() will fail with EINVAL. A caller may + * not remove all addresses from an association; sctp_bindx() will + * reject such an attempt with EINVAL. + * + * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate + * additional addresses with an endpoint after calling bind(). Or use + * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening + * socket is associated with so that no new association accepted will be + * associated with those addresses. If the endpoint supports dynamic + * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a + * endpoint to send the appropriate message to the peer to change the + * peers address lists. + * + * Adding and removing addresses from a connected association is + * optional functionality. Implementations that do not support this + * functionality should return EOPNOTSUPP. + * + * Basically do nothing but copying the addresses from user to kernel + * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. + * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() + * from userspace. + * + * We don't use copy_from_user() for optimization: we first do the + * sanity checks (buffer size -fast- and access check-healthy + * pointer); if all of those succeed, then we can alloc the memory + * (expensive operation) needed to copy the data to kernel. Then we do + * the copying without checking the user space area + * (__copy_from_user()). + * + * On exit there is no need to do sockfd_put(), sys_setsockopt() does + * it. + * + * sk The sk of the socket + * addrs The pointer to the addresses in user land + * addrssize Size of the addrs buffer + * op Operation to perform (add or remove, see the flags of + * sctp_bindx) + * + * Returns 0 if ok, <0 errno code on error. + */ +static int sctp_setsockopt_bindx(struct sock *sk, + struct sockaddr __user *addrs, + int addrs_size, int op) +{ + struct sockaddr *kaddrs; + int err; + int addrcnt = 0; + int walk_size = 0; + struct sockaddr *sa_addr; + void *addr_buf; + struct sctp_af *af; + + pr_debug("%s: sk:%p addrs:%p addrs_size:%d opt:%d\n", + __func__, sk, addrs, addrs_size, op); + + if (unlikely(addrs_size <= 0)) + return -EINVAL; + + /* Check the user passed a healthy pointer. */ + if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) + return -EFAULT; + + /* Alloc space for the address array in kernel memory. */ + kaddrs = kmalloc(addrs_size, GFP_KERNEL); + if (unlikely(!kaddrs)) + return -ENOMEM; + + if (__copy_from_user(kaddrs, addrs, addrs_size)) { + kfree(kaddrs); + return -EFAULT; + } + + /* Walk through the addrs buffer and count the number of addresses. */ + addr_buf = kaddrs; + while (walk_size < addrs_size) { + if (walk_size + sizeof(sa_family_t) > addrs_size) { + kfree(kaddrs); + return -EINVAL; + } + + sa_addr = addr_buf; + af = sctp_get_af_specific(sa_addr->sa_family); + + /* If the address family is not supported or if this address + * causes the address buffer to overflow return EINVAL. + */ + if (!af || (walk_size + af->sockaddr_len) > addrs_size) { + kfree(kaddrs); + return -EINVAL; + } + addrcnt++; + addr_buf += af->sockaddr_len; + walk_size += af->sockaddr_len; + } + + /* Do the work. */ + switch (op) { + case SCTP_BINDX_ADD_ADDR: + err = sctp_bindx_add(sk, kaddrs, addrcnt); + if (err) + goto out; + err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); + break; + + case SCTP_BINDX_REM_ADDR: + err = sctp_bindx_rem(sk, kaddrs, addrcnt); + if (err) + goto out; + err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); + break; + + default: + err = -EINVAL; + break; + } + +out: + kfree(kaddrs); + + return err; +} + +/* __sctp_connect(struct sock* sk, struct sockaddr *kaddrs, int addrs_size) + * + * Common routine for handling connect() and sctp_connectx(). + * Connect will come in with just a single address. + */ +static int __sctp_connect(struct sock *sk, + struct sockaddr *kaddrs, + int addrs_size, + sctp_assoc_t *assoc_id) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + struct sctp_endpoint *ep; + struct sctp_association *asoc = NULL; + struct sctp_association *asoc2; + struct sctp_transport *transport; + union sctp_addr to; + sctp_scope_t scope; + long timeo; + int err = 0; + int addrcnt = 0; + int walk_size = 0; + union sctp_addr *sa_addr = NULL; + void *addr_buf; + unsigned short port; + unsigned int f_flags = 0; + + sp = sctp_sk(sk); + ep = sp->ep; + + /* connect() cannot be done on a socket that is already in ESTABLISHED + * state - UDP-style peeled off socket or a TCP-style socket that + * is already connected. + * It cannot be done even on a TCP-style listening socket. + */ + if (sctp_sstate(sk, ESTABLISHED) || + (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { + err = -EISCONN; + goto out_free; + } + + /* Walk through the addrs buffer and count the number of addresses. */ + addr_buf = kaddrs; + while (walk_size < addrs_size) { + struct sctp_af *af; + + if (walk_size + sizeof(sa_family_t) > addrs_size) { + err = -EINVAL; + goto out_free; + } + + sa_addr = addr_buf; + af = sctp_get_af_specific(sa_addr->sa.sa_family); + + /* If the address family is not supported or if this address + * causes the address buffer to overflow return EINVAL. + */ + if (!af || (walk_size + af->sockaddr_len) > addrs_size) { + err = -EINVAL; + goto out_free; + } + + port = ntohs(sa_addr->v4.sin_port); + + /* Save current address so we can work with it */ + memcpy(&to, sa_addr, af->sockaddr_len); + + err = sctp_verify_addr(sk, &to, af->sockaddr_len); + if (err) + goto out_free; + + /* Make sure the destination port is correctly set + * in all addresses. + */ + if (asoc && asoc->peer.port && asoc->peer.port != port) { + err = -EINVAL; + goto out_free; + } + + /* Check if there already is a matching association on the + * endpoint (other than the one created here). + */ + asoc2 = sctp_endpoint_lookup_assoc(ep, &to, &transport); + if (asoc2 && asoc2 != asoc) { + if (asoc2->state >= SCTP_STATE_ESTABLISHED) + err = -EISCONN; + else + err = -EALREADY; + goto out_free; + } + + /* If we could not find a matching association on the endpoint, + * make sure that there is no peeled-off association matching + * the peer address even on another socket. + */ + if (sctp_endpoint_is_peeled_off(ep, &to)) { + err = -EADDRNOTAVAIL; + goto out_free; + } + + if (!asoc) { + /* If a bind() or sctp_bindx() is not called prior to + * an sctp_connectx() call, the system picks an + * ephemeral port and will choose an address set + * equivalent to binding with a wildcard address. + */ + if (!ep->base.bind_addr.port) { + if (sctp_autobind(sk)) { + err = -EAGAIN; + goto out_free; + } + } else { + /* + * If an unprivileged user inherits a 1-many + * style socket with open associations on a + * privileged port, it MAY be permitted to + * accept new associations, but it SHOULD NOT + * be permitted to open new associations. + */ + if (ep->base.bind_addr.port < PROT_SOCK && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { + err = -EACCES; + goto out_free; + } + } + + scope = sctp_scope(&to); + asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); + if (!asoc) { + err = -ENOMEM; + goto out_free; + } + + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, + GFP_KERNEL); + if (err < 0) { + goto out_free; + } + + } + + /* Prime the peer's transport structures. */ + transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, + SCTP_UNKNOWN); + if (!transport) { + err = -ENOMEM; + goto out_free; + } + + addrcnt++; + addr_buf += af->sockaddr_len; + walk_size += af->sockaddr_len; + } + + /* In case the user of sctp_connectx() wants an association + * id back, assign one now. + */ + if (assoc_id) { + err = sctp_assoc_set_id(asoc, GFP_KERNEL); + if (err < 0) + goto out_free; + } + + err = sctp_primitive_ASSOCIATE(net, asoc, NULL); + if (err < 0) { + goto out_free; + } + + /* Initialize sk's dport and daddr for getpeername() */ + inet_sk(sk)->inet_dport = htons(asoc->peer.port); + sp->pf->to_sk_daddr(sa_addr, sk); + sk->sk_err = 0; + + /* in-kernel sockets don't generally have a file allocated to them + * if all they do is call sock_create_kern(). + */ + if (sk->sk_socket->file) + f_flags = sk->sk_socket->file->f_flags; + + timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); + + err = sctp_wait_for_connect(asoc, &timeo); + if ((err == 0 || err == -EINPROGRESS) && assoc_id) + *assoc_id = asoc->assoc_id; + + /* Don't free association on exit. */ + asoc = NULL; + +out_free: + pr_debug("%s: took out_free path with asoc:%p kaddrs:%p err:%d\n", + __func__, asoc, kaddrs, err); + + if (asoc) { + /* sctp_primitive_ASSOCIATE may have added this association + * To the hash table, try to unhash it, just in case, its a noop + * if it wasn't hashed so we're safe + */ + sctp_unhash_established(asoc); + sctp_association_free(asoc); + } + return err; +} + +/* Helper for tunneling sctp_connectx() requests through sctp_setsockopt() + * + * API 8.9 + * int sctp_connectx(int sd, struct sockaddr *addrs, int addrcnt, + * sctp_assoc_t *asoc); + * + * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. + * If the sd is an IPv6 socket, the addresses passed can either be IPv4 + * or IPv6 addresses. + * + * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see + * Section 3.1.2 for this usage. + * + * addrs is a pointer to an array of one or more socket addresses. Each + * address is contained in its appropriate structure (i.e. struct + * sockaddr_in or struct sockaddr_in6) the family of the address type + * must be used to distengish the address length (note that this + * representation is termed a "packed array" of addresses). The caller + * specifies the number of addresses in the array with addrcnt. + * + * On success, sctp_connectx() returns 0. It also sets the assoc_id to + * the association id of the new association. On failure, sctp_connectx() + * returns -1, and sets errno to the appropriate error code. The assoc_id + * is not touched by the kernel. + * + * For SCTP, the port given in each socket address must be the same, or + * sctp_connectx() will fail, setting errno to EINVAL. + * + * An application can use sctp_connectx to initiate an association with + * an endpoint that is multi-homed. Much like sctp_bindx() this call + * allows a caller to specify multiple addresses at which a peer can be + * reached. The way the SCTP stack uses the list of addresses to set up + * the association is implementation dependent. This function only + * specifies that the stack will try to make use of all the addresses in + * the list when needed. + * + * Note that the list of addresses passed in is only used for setting up + * the association. It does not necessarily equal the set of addresses + * the peer uses for the resulting association. If the caller wants to + * find out the set of peer addresses, it must use sctp_getpaddrs() to + * retrieve them after the association has been set up. + * + * Basically do nothing but copying the addresses from user to kernel + * land and invoking either sctp_connectx(). This is used for tunneling + * the sctp_connectx() request through sctp_setsockopt() from userspace. + * + * We don't use copy_from_user() for optimization: we first do the + * sanity checks (buffer size -fast- and access check-healthy + * pointer); if all of those succeed, then we can alloc the memory + * (expensive operation) needed to copy the data to kernel. Then we do + * the copying without checking the user space area + * (__copy_from_user()). + * + * On exit there is no need to do sockfd_put(), sys_setsockopt() does + * it. + * + * sk The sk of the socket + * addrs The pointer to the addresses in user land + * addrssize Size of the addrs buffer + * + * Returns >=0 if ok, <0 errno code on error. + */ +static int __sctp_setsockopt_connectx(struct sock *sk, + struct sockaddr __user *addrs, + int addrs_size, + sctp_assoc_t *assoc_id) +{ + int err = 0; + struct sockaddr *kaddrs; + + pr_debug("%s: sk:%p addrs:%p addrs_size:%d\n", + __func__, sk, addrs, addrs_size); + + if (unlikely(addrs_size <= 0)) + return -EINVAL; + + /* Check the user passed a healthy pointer. */ + if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) + return -EFAULT; + + /* Alloc space for the address array in kernel memory. */ + kaddrs = kmalloc(addrs_size, GFP_KERNEL); + if (unlikely(!kaddrs)) + return -ENOMEM; + + if (__copy_from_user(kaddrs, addrs, addrs_size)) { + err = -EFAULT; + } else { + err = __sctp_connect(sk, kaddrs, addrs_size, assoc_id); + } + + kfree(kaddrs); + + return err; +} + +/* + * This is an older interface. It's kept for backward compatibility + * to the option that doesn't provide association id. + */ +static int sctp_setsockopt_connectx_old(struct sock *sk, + struct sockaddr __user *addrs, + int addrs_size) +{ + return __sctp_setsockopt_connectx(sk, addrs, addrs_size, NULL); +} + +/* + * New interface for the API. The since the API is done with a socket + * option, to make it simple we feed back the association id is as a return + * indication to the call. Error is always negative and association id is + * always positive. + */ +static int sctp_setsockopt_connectx(struct sock *sk, + struct sockaddr __user *addrs, + int addrs_size) +{ + sctp_assoc_t assoc_id = 0; + int err = 0; + + err = __sctp_setsockopt_connectx(sk, addrs, addrs_size, &assoc_id); + + if (err) + return err; + else + return assoc_id; +} + +/* + * New (hopefully final) interface for the API. + * We use the sctp_getaddrs_old structure so that use-space library + * can avoid any unnecessary allocations. The only different part + * is that we store the actual length of the address buffer into the + * addrs_num structure member. That way we can re-use the existing + * code. + */ +#ifdef CONFIG_COMPAT +struct compat_sctp_getaddrs_old { + sctp_assoc_t assoc_id; + s32 addr_num; + compat_uptr_t addrs; /* struct sockaddr * */ +}; +#endif + +static int sctp_getsockopt_connectx3(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_getaddrs_old param; + sctp_assoc_t assoc_id = 0; + int err = 0; + +#ifdef CONFIG_COMPAT + if (is_compat_task()) { + struct compat_sctp_getaddrs_old param32; + + if (len < sizeof(param32)) + return -EINVAL; + if (copy_from_user(¶m32, optval, sizeof(param32))) + return -EFAULT; + + param.assoc_id = param32.assoc_id; + param.addr_num = param32.addr_num; + param.addrs = compat_ptr(param32.addrs); + } else +#endif + { + if (len < sizeof(param)) + return -EINVAL; + if (copy_from_user(¶m, optval, sizeof(param))) + return -EFAULT; + } + + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *) + param.addrs, param.addr_num, + &assoc_id); + if (err == 0 || err == -EINPROGRESS) { + if (copy_to_user(optval, &assoc_id, sizeof(assoc_id))) + return -EFAULT; + if (put_user(sizeof(assoc_id), optlen)) + return -EFAULT; + } + + return err; +} + +/* API 3.1.4 close() - UDP Style Syntax + * Applications use close() to perform graceful shutdown (as described in + * Section 10.1 of [SCTP]) on ALL the associations currently represented + * by a UDP-style socket. + * + * The syntax is + * + * ret = close(int sd); + * + * sd - the socket descriptor of the associations to be closed. + * + * To gracefully shutdown a specific association represented by the + * UDP-style socket, an application should use the sendmsg() call, + * passing no user data, but including the appropriate flag in the + * ancillary data (see Section xxxx). + * + * If sd in the close() call is a branched-off socket representing only + * one association, the shutdown is performed on that association only. + * + * 4.1.6 close() - TCP Style Syntax + * + * Applications use close() to gracefully close down an association. + * + * The syntax is: + * + * int close(int sd); + * + * sd - the socket descriptor of the association to be closed. + * + * After an application calls close() on a socket descriptor, no further + * socket operations will succeed on that descriptor. + * + * API 7.1.4 SO_LINGER + * + * An application using the TCP-style socket can use this option to + * perform the SCTP ABORT primitive. The linger option structure is: + * + * struct linger { + * int l_onoff; // option on/off + * int l_linger; // linger time + * }; + * + * To enable the option, set l_onoff to 1. If the l_linger value is set + * to 0, calling close() is the same as the ABORT primitive. If the + * value is set to a negative value, the setsockopt() call will return + * an error. If the value is set to a positive value linger_time, the + * close() can be blocked for at most linger_time ms. If the graceful + * shutdown phase does not finish during this period, close() will + * return but the graceful shutdown phase continues in the system. + */ +static void sctp_close(struct sock *sk, long timeout) +{ + struct net *net = sock_net(sk); + struct sctp_endpoint *ep; + struct sctp_association *asoc; + struct list_head *pos, *temp; + unsigned int data_was_unread; + + pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout); + + lock_sock(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + sk->sk_state = SCTP_SS_CLOSING; + + ep = sctp_sk(sk)->ep; + + /* Clean up any skbs sitting on the receive queue. */ + data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue); + data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); + + /* Walk all associations on an endpoint. */ + list_for_each_safe(pos, temp, &ep->asocs) { + asoc = list_entry(pos, struct sctp_association, asocs); + + if (sctp_style(sk, TCP)) { + /* A closed association can still be in the list if + * it belongs to a TCP-style listening socket that is + * not yet accepted. If so, free it. If not, send an + * ABORT or SHUTDOWN based on the linger options. + */ + if (sctp_state(asoc, CLOSED)) { + sctp_unhash_established(asoc); + sctp_association_free(asoc); + continue; + } + } + + if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) || + !skb_queue_empty(&asoc->ulpq.reasm) || + (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) { + struct sctp_chunk *chunk; + + chunk = sctp_make_abort_user(asoc, NULL, 0); + if (chunk) + sctp_primitive_ABORT(net, asoc, chunk); + } else + sctp_primitive_SHUTDOWN(net, asoc, NULL); + } + + /* On a TCP-style socket, block for at most linger_time if set. */ + if (sctp_style(sk, TCP) && timeout) + sctp_wait_for_close(sk, timeout); + + /* This will run the backlog queue. */ + release_sock(sk); + + /* Supposedly, no process has access to the socket, but + * the net layers still may. + * Also, sctp_destroy_sock() needs to be called with addr_wq_lock + * held and that should be grabbed before socket lock. + */ + spin_lock_bh(&net->sctp.addr_wq_lock); + bh_lock_sock(sk); + + /* Hold the sock, since sk_common_release() will put sock_put() + * and we have just a little more cleanup. + */ + sock_hold(sk); + sk_common_release(sk); + + bh_unlock_sock(sk); + spin_unlock_bh(&net->sctp.addr_wq_lock); + + sock_put(sk); + + SCTP_DBG_OBJCNT_DEC(sock); +} + +/* Handle EPIPE error. */ +static int sctp_error(struct sock *sk, int flags, int err) +{ + if (err == -EPIPE) + err = sock_error(sk) ? : -EPIPE; + if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) + send_sig(SIGPIPE, current, 0); + return err; +} + +/* API 3.1.3 sendmsg() - UDP Style Syntax + * + * An application uses sendmsg() and recvmsg() calls to transmit data to + * and receive data from its peer. + * + * ssize_t sendmsg(int socket, const struct msghdr *message, + * int flags); + * + * socket - the socket descriptor of the endpoint. + * message - pointer to the msghdr structure which contains a single + * user message and possibly some ancillary data. + * + * See Section 5 for complete description of the data + * structures. + * + * flags - flags sent or received with the user message, see Section + * 5 for complete description of the flags. + * + * Note: This function could use a rewrite especially when explicit + * connect support comes in. + */ +/* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ + +static int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); + +static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + struct sctp_endpoint *ep; + struct sctp_association *new_asoc = NULL, *asoc = NULL; + struct sctp_transport *transport, *chunk_tp; + struct sctp_chunk *chunk; + union sctp_addr to; + struct sockaddr *msg_name = NULL; + struct sctp_sndrcvinfo default_sinfo; + struct sctp_sndrcvinfo *sinfo; + struct sctp_initmsg *sinit; + sctp_assoc_t associd = 0; + sctp_cmsgs_t cmsgs = { NULL }; + sctp_scope_t scope; + bool fill_sinfo_ttl = false, wait_connect = false; + struct sctp_datamsg *datamsg; + int msg_flags = msg->msg_flags; + __u16 sinfo_flags = 0; + long timeo; + int err; + + err = 0; + sp = sctp_sk(sk); + ep = sp->ep; + + pr_debug("%s: sk:%p, msg:%p, msg_len:%zu ep:%p\n", __func__, sk, + msg, msg_len, ep); + + /* We cannot send a message over a TCP-style listening socket. */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { + err = -EPIPE; + goto out_nounlock; + } + + /* Parse out the SCTP CMSGs. */ + err = sctp_msghdr_parse(msg, &cmsgs); + if (err) { + pr_debug("%s: msghdr parse err:%x\n", __func__, err); + goto out_nounlock; + } + + /* Fetch the destination address for this packet. This + * address only selects the association--it is not necessarily + * the address we will send to. + * For a peeled-off socket, msg_name is ignored. + */ + if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { + int msg_namelen = msg->msg_namelen; + + err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, + msg_namelen); + if (err) + return err; + + if (msg_namelen > sizeof(to)) + msg_namelen = sizeof(to); + memcpy(&to, msg->msg_name, msg_namelen); + msg_name = msg->msg_name; + } + + sinit = cmsgs.init; + if (cmsgs.sinfo != NULL) { + memset(&default_sinfo, 0, sizeof(default_sinfo)); + default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid; + default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags; + default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid; + default_sinfo.sinfo_context = cmsgs.sinfo->snd_context; + default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id; + + sinfo = &default_sinfo; + fill_sinfo_ttl = true; + } else { + sinfo = cmsgs.srinfo; + } + /* Did the user specify SNDINFO/SNDRCVINFO? */ + if (sinfo) { + sinfo_flags = sinfo->sinfo_flags; + associd = sinfo->sinfo_assoc_id; + } + + pr_debug("%s: msg_len:%zu, sinfo_flags:0x%x\n", __func__, + msg_len, sinfo_flags); + + /* SCTP_EOF or SCTP_ABORT cannot be set on a TCP-style socket. */ + if (sctp_style(sk, TCP) && (sinfo_flags & (SCTP_EOF | SCTP_ABORT))) { + err = -EINVAL; + goto out_nounlock; + } + + /* If SCTP_EOF is set, no data can be sent. Disallow sending zero + * length messages when SCTP_EOF|SCTP_ABORT is not set. + * If SCTP_ABORT is set, the message length could be non zero with + * the msg_iov set to the user abort reason. + */ + if (((sinfo_flags & SCTP_EOF) && (msg_len > 0)) || + (!(sinfo_flags & (SCTP_EOF|SCTP_ABORT)) && (msg_len == 0))) { + err = -EINVAL; + goto out_nounlock; + } + + /* If SCTP_ADDR_OVER is set, there must be an address + * specified in msg_name. + */ + if ((sinfo_flags & SCTP_ADDR_OVER) && (!msg->msg_name)) { + err = -EINVAL; + goto out_nounlock; + } + + transport = NULL; + + pr_debug("%s: about to look up association\n", __func__); + + lock_sock(sk); + + /* If a msg_name has been specified, assume this is to be used. */ + if (msg_name) { + /* Look for a matching association on the endpoint. */ + asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); + if (!asoc) { + /* If we could not find a matching association on the + * endpoint, make sure that it is not a TCP-style + * socket that already has an association or there is + * no peeled-off association on another socket. + */ + if ((sctp_style(sk, TCP) && + sctp_sstate(sk, ESTABLISHED)) || + sctp_endpoint_is_peeled_off(ep, &to)) { + err = -EADDRNOTAVAIL; + goto out_unlock; + } + } + } else { + asoc = sctp_id2assoc(sk, associd); + if (!asoc) { + err = -EPIPE; + goto out_unlock; + } + } + + if (asoc) { + pr_debug("%s: just looked up association:%p\n", __func__, asoc); + + /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED + * socket that has an association in CLOSED state. This can + * happen when an accepted socket has an association that is + * already CLOSED. + */ + if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { + err = -EPIPE; + goto out_unlock; + } + + if (sinfo_flags & SCTP_EOF) { + pr_debug("%s: shutting down association:%p\n", + __func__, asoc); + + sctp_primitive_SHUTDOWN(net, asoc, NULL); + err = 0; + goto out_unlock; + } + if (sinfo_flags & SCTP_ABORT) { + + chunk = sctp_make_abort_user(asoc, msg, msg_len); + if (!chunk) { + err = -ENOMEM; + goto out_unlock; + } + + pr_debug("%s: aborting association:%p\n", + __func__, asoc); + + sctp_primitive_ABORT(net, asoc, chunk); + err = 0; + goto out_unlock; + } + } + + /* Do we need to create the association? */ + if (!asoc) { + pr_debug("%s: there is no association yet\n", __func__); + + if (sinfo_flags & (SCTP_EOF | SCTP_ABORT)) { + err = -EINVAL; + goto out_unlock; + } + + /* Check for invalid stream against the stream counts, + * either the default or the user specified stream counts. + */ + if (sinfo) { + if (!sinit || !sinit->sinit_num_ostreams) { + /* Check against the defaults. */ + if (sinfo->sinfo_stream >= + sp->initmsg.sinit_num_ostreams) { + err = -EINVAL; + goto out_unlock; + } + } else { + /* Check against the requested. */ + if (sinfo->sinfo_stream >= + sinit->sinit_num_ostreams) { + err = -EINVAL; + goto out_unlock; + } + } + } + + /* + * API 3.1.2 bind() - UDP Style Syntax + * If a bind() or sctp_bindx() is not called prior to a + * sendmsg() call that initiates a new association, the + * system picks an ephemeral port and will choose an address + * set equivalent to binding with a wildcard address. + */ + if (!ep->base.bind_addr.port) { + if (sctp_autobind(sk)) { + err = -EAGAIN; + goto out_unlock; + } + } else { + /* + * If an unprivileged user inherits a one-to-many + * style socket with open associations on a privileged + * port, it MAY be permitted to accept new associations, + * but it SHOULD NOT be permitted to open new + * associations. + */ + if (ep->base.bind_addr.port < PROT_SOCK && + !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) { + err = -EACCES; + goto out_unlock; + } + } + + scope = sctp_scope(&to); + new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); + if (!new_asoc) { + err = -ENOMEM; + goto out_unlock; + } + asoc = new_asoc; + err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL); + if (err < 0) { + err = -ENOMEM; + goto out_free; + } + + /* If the SCTP_INIT ancillary data is specified, set all + * the association init values accordingly. + */ + if (sinit) { + if (sinit->sinit_num_ostreams) { + asoc->c.sinit_num_ostreams = + sinit->sinit_num_ostreams; + } + if (sinit->sinit_max_instreams) { + asoc->c.sinit_max_instreams = + sinit->sinit_max_instreams; + } + if (sinit->sinit_max_attempts) { + asoc->max_init_attempts + = sinit->sinit_max_attempts; + } + if (sinit->sinit_max_init_timeo) { + asoc->max_init_timeo = + msecs_to_jiffies(sinit->sinit_max_init_timeo); + } + } + + /* Prime the peer's transport structures. */ + transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL, SCTP_UNKNOWN); + if (!transport) { + err = -ENOMEM; + goto out_free; + } + } + + /* ASSERT: we have a valid association at this point. */ + pr_debug("%s: we have a valid association\n", __func__); + + if (!sinfo) { + /* If the user didn't specify SNDINFO/SNDRCVINFO, make up + * one with some defaults. + */ + memset(&default_sinfo, 0, sizeof(default_sinfo)); + default_sinfo.sinfo_stream = asoc->default_stream; + default_sinfo.sinfo_flags = asoc->default_flags; + default_sinfo.sinfo_ppid = asoc->default_ppid; + default_sinfo.sinfo_context = asoc->default_context; + default_sinfo.sinfo_timetolive = asoc->default_timetolive; + default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); + + sinfo = &default_sinfo; + } else if (fill_sinfo_ttl) { + /* In case SNDINFO was specified, we still need to fill + * it with a default ttl from the assoc here. + */ + sinfo->sinfo_timetolive = asoc->default_timetolive; + } + + /* API 7.1.7, the sndbuf size per association bounds the + * maximum size of data that can be sent in a single send call. + */ + if (msg_len > sk->sk_sndbuf) { + err = -EMSGSIZE; + goto out_free; + } + + if (asoc->pmtu_pending) + sctp_assoc_pending_pmtu(sk, asoc); + + /* If fragmentation is disabled and the message length exceeds the + * association fragmentation point, return EMSGSIZE. The I-D + * does not specify what this error is, but this looks like + * a great fit. + */ + if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { + err = -EMSGSIZE; + goto out_free; + } + + /* Check for invalid stream. */ + if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { + err = -EINVAL; + goto out_free; + } + + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); + if (!sctp_wspace(asoc)) { + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); + if (err) + goto out_free; + } + + /* If an address is passed with the sendto/sendmsg call, it is used + * to override the primary destination address in the TCP model, or + * when SCTP_ADDR_OVER flag is set in the UDP model. + */ + if ((sctp_style(sk, TCP) && msg_name) || + (sinfo_flags & SCTP_ADDR_OVER)) { + chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); + if (!chunk_tp) { + err = -EINVAL; + goto out_free; + } + } else + chunk_tp = NULL; + + /* Auto-connect, if we aren't connected already. */ + if (sctp_state(asoc, CLOSED)) { + err = sctp_primitive_ASSOCIATE(net, asoc, NULL); + if (err < 0) + goto out_free; + + wait_connect = true; + pr_debug("%s: we associated primitively\n", __func__); + } + + /* Break the message into multiple chunks of maximum size. */ + datamsg = sctp_datamsg_from_user(asoc, sinfo, &msg->msg_iter); + if (IS_ERR(datamsg)) { + err = PTR_ERR(datamsg); + goto out_free; + } + + /* Now send the (possibly) fragmented message. */ + list_for_each_entry(chunk, &datamsg->chunks, frag_list) { + sctp_chunk_hold(chunk); + + /* Do accounting for the write space. */ + sctp_set_owner_w(chunk); + + chunk->transport = chunk_tp; + } + + /* Send it to the lower layers. Note: all chunks + * must either fail or succeed. The lower layer + * works that way today. Keep it that way or this + * breaks. + */ + err = sctp_primitive_SEND(net, asoc, datamsg); + /* Did the lower layer accept the chunk? */ + if (err) { + sctp_datamsg_free(datamsg); + goto out_free; + } + + pr_debug("%s: we sent primitively\n", __func__); + + sctp_datamsg_put(datamsg); + err = msg_len; + + if (unlikely(wait_connect)) { + timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); + sctp_wait_for_connect(asoc, &timeo); + } + + /* If we are already past ASSOCIATE, the lower + * layers are responsible for association cleanup. + */ + goto out_unlock; + +out_free: + if (new_asoc) { + sctp_unhash_established(asoc); + sctp_association_free(asoc); + } +out_unlock: + release_sock(sk); + +out_nounlock: + return sctp_error(sk, msg_flags, err); + +#if 0 +do_sock_err: + if (msg_len) + err = msg_len; + else + err = sock_error(sk); + goto out; + +do_interrupted: + if (msg_len) + err = msg_len; + goto out; +#endif /* 0 */ +} + +/* This is an extended version of skb_pull() that removes the data from the + * start of a skb even when data is spread across the list of skb's in the + * frag_list. len specifies the total amount of data that needs to be removed. + * when 'len' bytes could be removed from the skb, it returns 0. + * If 'len' exceeds the total skb length, it returns the no. of bytes that + * could not be removed. + */ +static int sctp_skb_pull(struct sk_buff *skb, int len) +{ + struct sk_buff *list; + int skb_len = skb_headlen(skb); + int rlen; + + if (len <= skb_len) { + __skb_pull(skb, len); + return 0; + } + len -= skb_len; + __skb_pull(skb, skb_len); + + skb_walk_frags(skb, list) { + rlen = sctp_skb_pull(list, len); + skb->len -= (len-rlen); + skb->data_len -= (len-rlen); + + if (!rlen) + return 0; + + len = rlen; + } + + return len; +} + +/* API 3.1.3 recvmsg() - UDP Style Syntax + * + * ssize_t recvmsg(int socket, struct msghdr *message, + * int flags); + * + * socket - the socket descriptor of the endpoint. + * message - pointer to the msghdr structure which contains a single + * user message and possibly some ancillary data. + * + * See Section 5 for complete description of the data + * structures. + * + * flags - flags sent or received with the user message, see Section + * 5 for complete description of the flags. + */ +static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, + int noblock, int flags, int *addr_len) +{ + struct sctp_ulpevent *event = NULL; + struct sctp_sock *sp = sctp_sk(sk); + struct sk_buff *skb; + int copied; + int err = 0; + int skb_len; + + pr_debug("%s: sk:%p, msghdr:%p, len:%zd, noblock:%d, flags:0x%x, " + "addr_len:%p)\n", __func__, sk, msg, len, noblock, flags, + addr_len); + + lock_sock(sk); + + if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { + err = -ENOTCONN; + goto out; + } + + skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); + if (!skb) + goto out; + + /* Get the total length of the skb including any skb's in the + * frag_list. + */ + skb_len = skb->len; + + copied = skb_len; + if (copied > len) + copied = len; + + err = skb_copy_datagram_msg(skb, 0, msg, copied); + + event = sctp_skb2event(skb); + + if (err) + goto out_free; + + sock_recv_ts_and_drops(msg, sk, skb); + if (sctp_ulpevent_is_notification(event)) { + msg->msg_flags |= MSG_NOTIFICATION; + sp->pf->event_msgname(event, msg->msg_name, addr_len); + } else { + sp->pf->skb_msgname(skb, msg->msg_name, addr_len); + } + + /* Check if we allow SCTP_NXTINFO. */ + if (sp->recvnxtinfo) + sctp_ulpevent_read_nxtinfo(event, msg, sk); + /* Check if we allow SCTP_RCVINFO. */ + if (sp->recvrcvinfo) + sctp_ulpevent_read_rcvinfo(event, msg); + /* Check if we allow SCTP_SNDRCVINFO. */ + if (sp->subscribe.sctp_data_io_event) + sctp_ulpevent_read_sndrcvinfo(event, msg); + +#if 0 + /* FIXME: we should be calling IP/IPv6 layers. */ + if (sk->sk_protinfo.af_inet.cmsg_flags) + ip_cmsg_recv(msg, skb); +#endif + + err = copied; + + /* If skb's length exceeds the user's buffer, update the skb and + * push it back to the receive_queue so that the next call to + * recvmsg() will return the remaining data. Don't set MSG_EOR. + */ + if (skb_len > copied) { + msg->msg_flags &= ~MSG_EOR; + if (flags & MSG_PEEK) + goto out_free; + sctp_skb_pull(skb, copied); + skb_queue_head(&sk->sk_receive_queue, skb); + + /* When only partial message is copied to the user, increase + * rwnd by that amount. If all the data in the skb is read, + * rwnd is updated when the event is freed. + */ + if (!sctp_ulpevent_is_notification(event)) + sctp_assoc_rwnd_increase(event->asoc, copied); + goto out; + } else if ((event->msg_flags & MSG_NOTIFICATION) || + (event->msg_flags & MSG_EOR)) + msg->msg_flags |= MSG_EOR; + else + msg->msg_flags &= ~MSG_EOR; + +out_free: + if (flags & MSG_PEEK) { + /* Release the skb reference acquired after peeking the skb in + * sctp_skb_recv_datagram(). + */ + kfree_skb(skb); + } else { + /* Free the event which includes releasing the reference to + * the owner of the skb, freeing the skb and updating the + * rwnd. + */ + sctp_ulpevent_free(event); + } +out: + release_sock(sk); + return err; +} + +/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) + * + * This option is a on/off flag. If enabled no SCTP message + * fragmentation will be performed. Instead if a message being sent + * exceeds the current PMTU size, the message will NOT be sent and + * instead a error will be indicated to the user. + */ +static int sctp_setsockopt_disable_fragments(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; + + return 0; +} + +static int sctp_setsockopt_events(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct sctp_association *asoc; + struct sctp_ulpevent *event; + + if (optlen > sizeof(struct sctp_event_subscribe)) + return -EINVAL; + if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) + return -EFAULT; + + if (sctp_sk(sk)->subscribe.sctp_data_io_event) + pr_warn_ratelimited(DEPRECATED "%s (pid %d) " + "Requested SCTP_SNDRCVINFO event.\n" + "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n", + current->comm, task_pid_nr(current)); + + /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, + * if there is no data to be sent or retransmit, the stack will + * immediately send up this notification. + */ + if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT, + &sctp_sk(sk)->subscribe)) { + asoc = sctp_id2assoc(sk, 0); + + if (asoc && sctp_outq_is_empty(&asoc->outqueue)) { + event = sctp_ulpevent_make_sender_dry_event(asoc, + GFP_ATOMIC); + if (!event) + return -ENOMEM; + + sctp_ulpq_tail_event(&asoc->ulpq, event); + } + } + + return 0; +} + +/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) + * + * This socket option is applicable to the UDP-style socket only. When + * set it will cause associations that are idle for more than the + * specified number of seconds to automatically close. An association + * being idle is defined an association that has NOT sent or received + * user data. The special value of '0' indicates that no automatic + * close of any associations should be performed. The option expects an + * integer defining the number of seconds of idle time before an + * association is closed. + */ +static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct net *net = sock_net(sk); + + /* Applicable to UDP-style socket only */ + if (sctp_style(sk, TCP)) + return -EOPNOTSUPP; + if (optlen != sizeof(int)) + return -EINVAL; + if (copy_from_user(&sp->autoclose, optval, optlen)) + return -EFAULT; + + if (sp->autoclose > net->sctp.max_autoclose) + sp->autoclose = net->sctp.max_autoclose; + + return 0; +} + +/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) + * + * Applications can enable or disable heartbeats for any peer address of + * an association, modify an address's heartbeat interval, force a + * heartbeat to be sent immediately, and adjust the address's maximum + * number of retransmissions sent before an address is considered + * unreachable. The following structure is used to access and modify an + * address's parameters: + * + * struct sctp_paddrparams { + * sctp_assoc_t spp_assoc_id; + * struct sockaddr_storage spp_address; + * uint32_t spp_hbinterval; + * uint16_t spp_pathmaxrxt; + * uint32_t spp_pathmtu; + * uint32_t spp_sackdelay; + * uint32_t spp_flags; + * }; + * + * spp_assoc_id - (one-to-many style socket) This is filled in the + * application, and identifies the association for + * this query. + * spp_address - This specifies which address is of interest. + * spp_hbinterval - This contains the value of the heartbeat interval, + * in milliseconds. If a value of zero + * is present in this field then no changes are to + * be made to this parameter. + * spp_pathmaxrxt - This contains the maximum number of + * retransmissions before this address shall be + * considered unreachable. If a value of zero + * is present in this field then no changes are to + * be made to this parameter. + * spp_pathmtu - When Path MTU discovery is disabled the value + * specified here will be the "fixed" path mtu. + * Note that if the spp_address field is empty + * then all associations on this address will + * have this fixed path mtu set upon them. + * + * spp_sackdelay - When delayed sack is enabled, this value specifies + * the number of milliseconds that sacks will be delayed + * for. This value will apply to all addresses of an + * association if the spp_address field is empty. Note + * also, that if delayed sack is enabled and this + * value is set to 0, no change is made to the last + * recorded delayed sack timer value. + * + * spp_flags - These flags are used to control various features + * on an association. The flag field may contain + * zero or more of the following options. + * + * SPP_HB_ENABLE - Enable heartbeats on the + * specified address. Note that if the address + * field is empty all addresses for the association + * have heartbeats enabled upon them. + * + * SPP_HB_DISABLE - Disable heartbeats on the + * speicifed address. Note that if the address + * field is empty all addresses for the association + * will have their heartbeats disabled. Note also + * that SPP_HB_ENABLE and SPP_HB_DISABLE are + * mutually exclusive, only one of these two should + * be specified. Enabling both fields will have + * undetermined results. + * + * SPP_HB_DEMAND - Request a user initiated heartbeat + * to be made immediately. + * + * SPP_HB_TIME_IS_ZERO - Specify's that the time for + * heartbeat delayis to be set to the value of 0 + * milliseconds. + * + * SPP_PMTUD_ENABLE - This field will enable PMTU + * discovery upon the specified address. Note that + * if the address feild is empty then all addresses + * on the association are effected. + * + * SPP_PMTUD_DISABLE - This field will disable PMTU + * discovery upon the specified address. Note that + * if the address feild is empty then all addresses + * on the association are effected. Not also that + * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually + * exclusive. Enabling both will have undetermined + * results. + * + * SPP_SACKDELAY_ENABLE - Setting this flag turns + * on delayed sack. The time specified in spp_sackdelay + * is used to specify the sack delay for this address. Note + * that if spp_address is empty then all addresses will + * enable delayed sack and take on the sack delay + * value specified in spp_sackdelay. + * SPP_SACKDELAY_DISABLE - Setting this flag turns + * off delayed sack. If the spp_address field is blank then + * delayed sack is disabled for the entire association. Note + * also that this field is mutually exclusive to + * SPP_SACKDELAY_ENABLE, setting both will have undefined + * results. + */ +static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params, + struct sctp_transport *trans, + struct sctp_association *asoc, + struct sctp_sock *sp, + int hb_change, + int pmtud_change, + int sackdelay_change) +{ + int error; + + if (params->spp_flags & SPP_HB_DEMAND && trans) { + struct net *net = sock_net(trans->asoc->base.sk); + + error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); + if (error) + return error; + } + + /* Note that unless the spp_flag is set to SPP_HB_ENABLE the value of + * this field is ignored. Note also that a value of zero indicates + * the current setting should be left unchanged. + */ + if (params->spp_flags & SPP_HB_ENABLE) { + + /* Re-zero the interval if the SPP_HB_TIME_IS_ZERO is + * set. This lets us use 0 value when this flag + * is set. + */ + if (params->spp_flags & SPP_HB_TIME_IS_ZERO) + params->spp_hbinterval = 0; + + if (params->spp_hbinterval || + (params->spp_flags & SPP_HB_TIME_IS_ZERO)) { + if (trans) { + trans->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else if (asoc) { + asoc->hbinterval = + msecs_to_jiffies(params->spp_hbinterval); + } else { + sp->hbinterval = params->spp_hbinterval; + } + } + } + + if (hb_change) { + if (trans) { + trans->param_flags = + (trans->param_flags & ~SPP_HB) | hb_change; + } else if (asoc) { + asoc->param_flags = + (asoc->param_flags & ~SPP_HB) | hb_change; + } else { + sp->param_flags = + (sp->param_flags & ~SPP_HB) | hb_change; + } + } + + /* When Path MTU discovery is disabled the value specified here will + * be the "fixed" path mtu (i.e. the value of the spp_flags field must + * include the flag SPP_PMTUD_DISABLE for this field to have any + * effect). + */ + if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) { + if (trans) { + trans->pathmtu = params->spp_pathmtu; + sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + } else if (asoc) { + asoc->pathmtu = params->spp_pathmtu; + sctp_frag_point(asoc, params->spp_pathmtu); + } else { + sp->pathmtu = params->spp_pathmtu; + } + } + + if (pmtud_change) { + if (trans) { + int update = (trans->param_flags & SPP_PMTUD_DISABLE) && + (params->spp_flags & SPP_PMTUD_ENABLE); + trans->param_flags = + (trans->param_flags & ~SPP_PMTUD) | pmtud_change; + if (update) { + sctp_transport_pmtu(trans, sctp_opt2sk(sp)); + sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc); + } + } else if (asoc) { + asoc->param_flags = + (asoc->param_flags & ~SPP_PMTUD) | pmtud_change; + } else { + sp->param_flags = + (sp->param_flags & ~SPP_PMTUD) | pmtud_change; + } + } + + /* Note that unless the spp_flag is set to SPP_SACKDELAY_ENABLE the + * value of this field is ignored. Note also that a value of zero + * indicates the current setting should be left unchanged. + */ + if ((params->spp_flags & SPP_SACKDELAY_ENABLE) && params->spp_sackdelay) { + if (trans) { + trans->sackdelay = + msecs_to_jiffies(params->spp_sackdelay); + } else if (asoc) { + asoc->sackdelay = + msecs_to_jiffies(params->spp_sackdelay); + } else { + sp->sackdelay = params->spp_sackdelay; + } + } + + if (sackdelay_change) { + if (trans) { + trans->param_flags = + (trans->param_flags & ~SPP_SACKDELAY) | + sackdelay_change; + } else if (asoc) { + asoc->param_flags = + (asoc->param_flags & ~SPP_SACKDELAY) | + sackdelay_change; + } else { + sp->param_flags = + (sp->param_flags & ~SPP_SACKDELAY) | + sackdelay_change; + } + } + + /* Note that a value of zero indicates the current setting should be + left unchanged. + */ + if (params->spp_pathmaxrxt) { + if (trans) { + trans->pathmaxrxt = params->spp_pathmaxrxt; + } else if (asoc) { + asoc->pathmaxrxt = params->spp_pathmaxrxt; + } else { + sp->pathmaxrxt = params->spp_pathmaxrxt; + } + } + + return 0; +} + +static int sctp_setsockopt_peer_addr_params(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_paddrparams params; + struct sctp_transport *trans = NULL; + struct sctp_association *asoc = NULL; + struct sctp_sock *sp = sctp_sk(sk); + int error; + int hb_change, pmtud_change, sackdelay_change; + + if (optlen != sizeof(struct sctp_paddrparams)) + return -EINVAL; + + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + + /* Validate flags and value parameters. */ + hb_change = params.spp_flags & SPP_HB; + pmtud_change = params.spp_flags & SPP_PMTUD; + sackdelay_change = params.spp_flags & SPP_SACKDELAY; + + if (hb_change == SPP_HB || + pmtud_change == SPP_PMTUD || + sackdelay_change == SPP_SACKDELAY || + params.spp_sackdelay > 500 || + (params.spp_pathmtu && + params.spp_pathmtu < SCTP_DEFAULT_MINSEGMENT)) + return -EINVAL; + + /* If an address other than INADDR_ANY is specified, and + * no transport is found, then the request is invalid. + */ + if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { + trans = sctp_addr_id2transport(sk, ¶ms.spp_address, + params.spp_assoc_id); + if (!trans) + return -EINVAL; + } + + /* Get association, if assoc_id != 0 and the socket is a one + * to many style socket, and an association was not found, then + * the id was invalid. + */ + asoc = sctp_id2assoc(sk, params.spp_assoc_id); + if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + /* Heartbeat demand can only be sent on a transport or + * association, but not a socket. + */ + if (params.spp_flags & SPP_HB_DEMAND && !trans && !asoc) + return -EINVAL; + + /* Process parameters. */ + error = sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, + hb_change, pmtud_change, + sackdelay_change); + + if (error) + return error; + + /* If changes are for association, also apply parameters to each + * transport. + */ + if (!trans && asoc) { + list_for_each_entry(trans, &asoc->peer.transport_addr_list, + transports) { + sctp_apply_peer_addr_params(¶ms, trans, asoc, sp, + hb_change, pmtud_change, + sackdelay_change); + } + } + + return 0; +} + +static inline __u32 sctp_spp_sackdelay_enable(__u32 param_flags) +{ + return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_ENABLE; +} + +static inline __u32 sctp_spp_sackdelay_disable(__u32 param_flags) +{ + return (param_flags & ~SPP_SACKDELAY) | SPP_SACKDELAY_DISABLE; +} + +/* + * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) + * + * This option will effect the way delayed acks are performed. This + * option allows you to get or set the delayed ack time, in + * milliseconds. It also allows changing the delayed ack frequency. + * Changing the frequency to 1 disables the delayed sack algorithm. If + * the assoc_id is 0, then this sets or gets the endpoints default + * values. If the assoc_id field is non-zero, then the set or get + * effects the specified association for the one to many model (the + * assoc_id field is ignored by the one to one model). Note that if + * sack_delay or sack_freq are 0 when setting this option, then the + * current values will remain unchanged. + * + * struct sctp_sack_info { + * sctp_assoc_t sack_assoc_id; + * uint32_t sack_delay; + * uint32_t sack_freq; + * }; + * + * sack_assoc_id - This parameter, indicates which association the user + * is performing an action upon. Note that if this field's value is + * zero then the endpoints default value is changed (effecting future + * associations only). + * + * sack_delay - This parameter contains the number of milliseconds that + * the user is requesting the delayed ACK timer be set to. Note that + * this value is defined in the standard to be between 200 and 500 + * milliseconds. + * + * sack_freq - This parameter contains the number of packets that must + * be received before a sack is sent without waiting for the delay + * timer to expire. The default value for this is 2, setting this + * value to 1 will disable the delayed sack algorithm. + */ + +static int sctp_setsockopt_delayed_ack(struct sock *sk, + char __user *optval, unsigned int optlen) +{ + struct sctp_sack_info params; + struct sctp_transport *trans = NULL; + struct sctp_association *asoc = NULL; + struct sctp_sock *sp = sctp_sk(sk); + + if (optlen == sizeof(struct sctp_sack_info)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + + if (params.sack_delay == 0 && params.sack_freq == 0) + return 0; + } else if (optlen == sizeof(struct sctp_assoc_value)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of struct sctp_assoc_value in delayed_ack socket option.\n" + "Use struct sctp_sack_info instead\n", + current->comm, task_pid_nr(current)); + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + + if (params.sack_delay == 0) + params.sack_freq = 1; + else + params.sack_freq = 0; + } else + return -EINVAL; + + /* Validate value parameter. */ + if (params.sack_delay > 500) + return -EINVAL; + + /* Get association, if sack_assoc_id != 0 and the socket is a one + * to many style socket, and an association was not found, then + * the id was invalid. + */ + asoc = sctp_id2assoc(sk, params.sack_assoc_id); + if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (params.sack_delay) { + if (asoc) { + asoc->sackdelay = + msecs_to_jiffies(params.sack_delay); + asoc->param_flags = + sctp_spp_sackdelay_enable(asoc->param_flags); + } else { + sp->sackdelay = params.sack_delay; + sp->param_flags = + sctp_spp_sackdelay_enable(sp->param_flags); + } + } + + if (params.sack_freq == 1) { + if (asoc) { + asoc->param_flags = + sctp_spp_sackdelay_disable(asoc->param_flags); + } else { + sp->param_flags = + sctp_spp_sackdelay_disable(sp->param_flags); + } + } else if (params.sack_freq > 1) { + if (asoc) { + asoc->sackfreq = params.sack_freq; + asoc->param_flags = + sctp_spp_sackdelay_enable(asoc->param_flags); + } else { + sp->sackfreq = params.sack_freq; + sp->param_flags = + sctp_spp_sackdelay_enable(sp->param_flags); + } + } + + /* If change is for association, also apply to each transport. */ + if (asoc) { + list_for_each_entry(trans, &asoc->peer.transport_addr_list, + transports) { + if (params.sack_delay) { + trans->sackdelay = + msecs_to_jiffies(params.sack_delay); + trans->param_flags = + sctp_spp_sackdelay_enable(trans->param_flags); + } + if (params.sack_freq == 1) { + trans->param_flags = + sctp_spp_sackdelay_disable(trans->param_flags); + } else if (params.sack_freq > 1) { + trans->sackfreq = params.sack_freq; + trans->param_flags = + sctp_spp_sackdelay_enable(trans->param_flags); + } + } + } + + return 0; +} + +/* 7.1.3 Initialization Parameters (SCTP_INITMSG) + * + * Applications can specify protocol parameters for the default association + * initialization. The option name argument to setsockopt() and getsockopt() + * is SCTP_INITMSG. + * + * Setting initialization parameters is effective only on an unconnected + * socket (for UDP-style sockets only future associations are effected + * by the change). With TCP-style sockets, this option is inherited by + * sockets derived from a listener socket. + */ +static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, unsigned int optlen) +{ + struct sctp_initmsg sinit; + struct sctp_sock *sp = sctp_sk(sk); + + if (optlen != sizeof(struct sctp_initmsg)) + return -EINVAL; + if (copy_from_user(&sinit, optval, optlen)) + return -EFAULT; + + if (sinit.sinit_num_ostreams) + sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; + if (sinit.sinit_max_instreams) + sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; + if (sinit.sinit_max_attempts) + sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; + if (sinit.sinit_max_init_timeo) + sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; + + return 0; +} + +/* + * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) + * + * Applications that wish to use the sendto() system call may wish to + * specify a default set of parameters that would normally be supplied + * through the inclusion of ancillary data. This socket option allows + * such an application to set the default sctp_sndrcvinfo structure. + * The application that wishes to use this socket option simply passes + * in to this call the sctp_sndrcvinfo structure defined in Section + * 5.2.2) The input parameters accepted by this call include + * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, + * sinfo_timetolive. The user must provide the sinfo_assoc_id field in + * to this call if the caller is using the UDP model. + */ +static int sctp_setsockopt_default_send_param(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + struct sctp_sndrcvinfo info; + + if (optlen != sizeof(info)) + return -EINVAL; + if (copy_from_user(&info, optval, optlen)) + return -EFAULT; + if (info.sinfo_flags & + ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_ABORT | SCTP_EOF)) + return -EINVAL; + + asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); + if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + if (asoc) { + asoc->default_stream = info.sinfo_stream; + asoc->default_flags = info.sinfo_flags; + asoc->default_ppid = info.sinfo_ppid; + asoc->default_context = info.sinfo_context; + asoc->default_timetolive = info.sinfo_timetolive; + } else { + sp->default_stream = info.sinfo_stream; + sp->default_flags = info.sinfo_flags; + sp->default_ppid = info.sinfo_ppid; + sp->default_context = info.sinfo_context; + sp->default_timetolive = info.sinfo_timetolive; + } + + return 0; +} + +/* RFC6458, Section 8.1.31. Set/get Default Send Parameters + * (SCTP_DEFAULT_SNDINFO) + */ +static int sctp_setsockopt_default_sndinfo(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + struct sctp_sndinfo info; + + if (optlen != sizeof(info)) + return -EINVAL; + if (copy_from_user(&info, optval, optlen)) + return -EFAULT; + if (info.snd_flags & + ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_ABORT | SCTP_EOF)) + return -EINVAL; + + asoc = sctp_id2assoc(sk, info.snd_assoc_id); + if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + if (asoc) { + asoc->default_stream = info.snd_sid; + asoc->default_flags = info.snd_flags; + asoc->default_ppid = info.snd_ppid; + asoc->default_context = info.snd_context; + } else { + sp->default_stream = info.snd_sid; + sp->default_flags = info.snd_flags; + sp->default_ppid = info.snd_ppid; + sp->default_context = info.snd_context; + } + + return 0; +} + +/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) + * + * Requests that the local SCTP stack use the enclosed peer address as + * the association primary. The enclosed address must be one of the + * association peer's addresses. + */ +static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct sctp_prim prim; + struct sctp_transport *trans; + + if (optlen != sizeof(struct sctp_prim)) + return -EINVAL; + + if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) + return -EFAULT; + + trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); + if (!trans) + return -EINVAL; + + sctp_assoc_set_primary(trans->asoc, trans); + + return 0; +} + +/* + * 7.1.5 SCTP_NODELAY + * + * Turn on/off any Nagle-like algorithm. This means that packets are + * generally sent as soon as possible and no unnecessary delays are + * introduced, at the cost of more packets in the network. Expects an + * integer boolean flag. + */ +static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; + return 0; +} + +/* + * + * 7.1.1 SCTP_RTOINFO + * + * The protocol parameters used to initialize and bound retransmission + * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access + * and modify these parameters. + * All parameters are time values, in milliseconds. A value of 0, when + * modifying the parameters, indicates that the current value should not + * be changed. + * + */ +static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigned int optlen) +{ + struct sctp_rtoinfo rtoinfo; + struct sctp_association *asoc; + unsigned long rto_min, rto_max; + struct sctp_sock *sp = sctp_sk(sk); + + if (optlen != sizeof (struct sctp_rtoinfo)) + return -EINVAL; + + if (copy_from_user(&rtoinfo, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); + + /* Set the values to the specific association */ + if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + rto_max = rtoinfo.srto_max; + rto_min = rtoinfo.srto_min; + + if (rto_max) + rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max; + else + rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max; + + if (rto_min) + rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min; + else + rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min; + + if (rto_min > rto_max) + return -EINVAL; + + if (asoc) { + if (rtoinfo.srto_initial != 0) + asoc->rto_initial = + msecs_to_jiffies(rtoinfo.srto_initial); + asoc->rto_max = rto_max; + asoc->rto_min = rto_min; + } else { + /* If there is no association or the association-id = 0 + * set the values to the endpoint. + */ + if (rtoinfo.srto_initial != 0) + sp->rtoinfo.srto_initial = rtoinfo.srto_initial; + sp->rtoinfo.srto_max = rto_max; + sp->rtoinfo.srto_min = rto_min; + } + + return 0; +} + +/* + * + * 7.1.2 SCTP_ASSOCINFO + * + * This option is used to tune the maximum retransmission attempts + * of the association. + * Returns an error if the new association retransmission value is + * greater than the sum of the retransmission value of the peer. + * See [SCTP] for more information. + * + */ +static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, unsigned int optlen) +{ + + struct sctp_assocparams assocparams; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_assocparams)) + return -EINVAL; + if (copy_from_user(&assocparams, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); + + if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + /* Set the values to the specific association */ + if (asoc) { + if (assocparams.sasoc_asocmaxrxt != 0) { + __u32 path_sum = 0; + int paths = 0; + struct sctp_transport *peer_addr; + + list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list, + transports) { + path_sum += peer_addr->pathmaxrxt; + paths++; + } + + /* Only validate asocmaxrxt if we have more than + * one path/transport. We do this because path + * retransmissions are only counted when we have more + * then one path. + */ + if (paths > 1 && + assocparams.sasoc_asocmaxrxt > path_sum) + return -EINVAL; + + asoc->max_retrans = assocparams.sasoc_asocmaxrxt; + } + + if (assocparams.sasoc_cookie_life != 0) + asoc->cookie_life = ms_to_ktime(assocparams.sasoc_cookie_life); + } else { + /* Set the values to the endpoint */ + struct sctp_sock *sp = sctp_sk(sk); + + if (assocparams.sasoc_asocmaxrxt != 0) + sp->assocparams.sasoc_asocmaxrxt = + assocparams.sasoc_asocmaxrxt; + if (assocparams.sasoc_cookie_life != 0) + sp->assocparams.sasoc_cookie_life = + assocparams.sasoc_cookie_life; + } + return 0; +} + +/* + * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) + * + * This socket option is a boolean flag which turns on or off mapped V4 + * addresses. If this option is turned on and the socket is type + * PF_INET6, then IPv4 addresses will be mapped to V6 representation. + * If this option is turned off, then no mapping will be done of V4 + * addresses and a user will receive both PF_INET6 and PF_INET type + * addresses on the socket. + */ +static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, unsigned int optlen) +{ + int val; + struct sctp_sock *sp = sctp_sk(sk); + + if (optlen < sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + if (val) + sp->v4mapped = 1; + else + sp->v4mapped = 0; + + return 0; +} + +/* + * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) + * This option will get or set the maximum size to put in any outgoing + * SCTP DATA chunk. If a message is larger than this size it will be + * fragmented by SCTP into the specified size. Note that the underlying + * SCTP implementation may fragment into smaller sized chunks when the + * PMTU of the underlying association is smaller than the value set by + * the user. The default value for this option is '0' which indicates + * the user is NOT limiting fragmentation and only the PMTU will effect + * SCTP's choice of DATA chunk size. Note also that values set larger + * than the maximum size of an IP datagram will effectively let SCTP + * control fragmentation (i.e. the same as setting this option to 0). + * + * The following structure is used to access and modify this parameter: + * + * struct sctp_assoc_value { + * sctp_assoc_t assoc_id; + * uint32_t assoc_value; + * }; + * + * assoc_id: This parameter is ignored for one-to-one style sockets. + * For one-to-many style sockets this parameter indicates which + * association the user is performing an action upon. Note that if + * this field's value is zero then the endpoints default value is + * changed (effecting future associations only). + * assoc_value: This parameter specifies the maximum size in bytes. + */ +static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + struct sctp_sock *sp = sctp_sk(sk); + int val; + + if (optlen == sizeof(int)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of int in maxseg socket option.\n" + "Use struct sctp_assoc_value instead\n", + current->comm, task_pid_nr(current)); + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + params.assoc_id = 0; + } else if (optlen == sizeof(struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + val = params.assoc_value; + } else + return -EINVAL; + + if ((val != 0) && ((val < 8) || (val > SCTP_MAX_CHUNK_LEN))) + return -EINVAL; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) { + if (val == 0) { + val = asoc->pathmtu; + val -= sp->pf->af->net_header_len; + val -= sizeof(struct sctphdr) + + sizeof(struct sctp_data_chunk); + } + asoc->user_frag = val; + asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu); + } else { + sp->user_frag = val; + } + + return 0; +} + + +/* + * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) + * + * Requests that the peer mark the enclosed address as the association + * primary. The enclosed address must be one of the association's + * locally bound addresses. The following structure is used to make a + * set primary request: + */ +static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + struct sctp_association *asoc = NULL; + struct sctp_setpeerprim prim; + struct sctp_chunk *chunk; + struct sctp_af *af; + int err; + + sp = sctp_sk(sk); + + if (!net->sctp.addip_enable) + return -EPERM; + + if (optlen != sizeof(struct sctp_setpeerprim)) + return -EINVAL; + + if (copy_from_user(&prim, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); + if (!asoc) + return -EINVAL; + + if (!asoc->peer.asconf_capable) + return -EPERM; + + if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) + return -EPERM; + + if (!sctp_state(asoc, ESTABLISHED)) + return -ENOTCONN; + + af = sctp_get_af_specific(prim.sspp_addr.ss_family); + if (!af) + return -EINVAL; + + if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) + return -EADDRNOTAVAIL; + + if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) + return -EADDRNOTAVAIL; + + /* Create an ASCONF chunk with SET_PRIMARY parameter */ + chunk = sctp_make_asconf_set_prim(asoc, + (union sctp_addr *)&prim.sspp_addr); + if (!chunk) + return -ENOMEM; + + err = sctp_send_asconf(asoc, chunk); + + pr_debug("%s: we set peer primary addr primitively\n", __func__); + + return err; +} + +static int sctp_setsockopt_adaptation_layer(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct sctp_setadaptation adaptation; + + if (optlen != sizeof(struct sctp_setadaptation)) + return -EINVAL; + if (copy_from_user(&adaptation, optval, optlen)) + return -EFAULT; + + sctp_sk(sk)->adaptation_ind = adaptation.ssb_adaptation_ind; + + return 0; +} + +/* + * 7.1.29. Set or Get the default context (SCTP_CONTEXT) + * + * The context field in the sctp_sndrcvinfo structure is normally only + * used when a failed message is retrieved holding the value that was + * sent down on the actual send call. This option allows the setting of + * a default context on an association basis that will be received on + * reading messages from the peer. This is especially helpful in the + * one-2-many model for an application to keep some reference to an + * internal state machine that is processing messages on the + * association. Note that the setting of this value only effects + * received messages from the peer and does not effect the value that is + * saved with outbound messages. + */ +static int sctp_setsockopt_context(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; + + if (optlen != sizeof(struct sctp_assoc_value)) + return -EINVAL; + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + + sp = sctp_sk(sk); + + if (params.assoc_id != 0) { + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc) + return -EINVAL; + asoc->default_rcv_context = params.assoc_value; + } else { + sp->default_rcv_context = params.assoc_value; + } + + return 0; +} + +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * + * This options will at a minimum specify if the implementation is doing + * fragmented interleave. Fragmented interleave, for a one to many + * socket, is when subsequent calls to receive a message may return + * parts of messages from different associations. Some implementations + * may allow you to turn this value on or off. If so, when turned off, + * no fragment interleave will occur (which will cause a head of line + * blocking amongst multiple associations sharing the same one to many + * socket). When this option is turned on, then each receive call may + * come from a different association (thus the user must receive data + * with the extended calls (e.g. sctp_recvmsg) to keep track of which + * association each receive belongs to. + * + * This option takes a boolean value. A non-zero value indicates that + * fragmented interleave is on. A value of zero indicates that + * fragmented interleave is off. + * + * Note that it is important that an implementation that allows this + * option to be turned on, have it off by default. Otherwise an unaware + * application using the one to many model may become confused and act + * incorrectly. + */ +static int sctp_setsockopt_fragment_interleave(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + int val; + + if (optlen != sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + sctp_sk(sk)->frag_interleave = (val == 0) ? 0 : 1; + + return 0; +} + +/* + * 8.1.21. Set or Get the SCTP Partial Delivery Point + * (SCTP_PARTIAL_DELIVERY_POINT) + * + * This option will set or get the SCTP partial delivery point. This + * point is the size of a message where the partial delivery API will be + * invoked to help free up rwnd space for the peer. Setting this to a + * lower value will cause partial deliveries to happen more often. The + * calls argument is an integer that sets or gets the partial delivery + * point. Note also that the call will fail if the user attempts to set + * this value larger than the socket receive buffer size. + * + * Note that any single message having a length smaller than or equal to + * the SCTP partial delivery point will be delivered in one single read + * call as long as the user provided buffer is large enough to hold the + * message. + */ +static int sctp_setsockopt_partial_delivery_point(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + u32 val; + + if (optlen != sizeof(u32)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + + /* Note: We double the receive buffer from what the user sets + * it to be, also initial rwnd is based on rcvbuf/2. + */ + if (val > (sk->sk_rcvbuf >> 1)) + return -EINVAL; + + sctp_sk(sk)->pd_point = val; + + return 0; /* is this the right error code? */ +} + +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * + * This option will allow a user to change the maximum burst of packets + * that can be emitted by this association. Note that the default value + * is 4, and some implementations may restrict this setting so that it + * can only be lowered. + * + * NOTE: This text doesn't seem right. Do this on a socket basis with + * future associations inheriting the socket value. + */ +static int sctp_setsockopt_maxburst(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; + int val; + int assoc_id = 0; + + if (optlen == sizeof(int)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of int in max_burst socket option deprecated.\n" + "Use struct sctp_assoc_value instead\n", + current->comm, task_pid_nr(current)); + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + } else if (optlen == sizeof(struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + val = params.assoc_value; + assoc_id = params.assoc_id; + } else + return -EINVAL; + + sp = sctp_sk(sk); + + if (assoc_id != 0) { + asoc = sctp_id2assoc(sk, assoc_id); + if (!asoc) + return -EINVAL; + asoc->max_burst = val; + } else + sp->max_burst = val; + + return 0; +} + +/* + * 7.1.18. Add a chunk that must be authenticated (SCTP_AUTH_CHUNK) + * + * This set option adds a chunk type that the user is requesting to be + * received only in an authenticated way. Changes to the list of chunks + * will only effect future associations on the socket. + */ +static int sctp_setsockopt_auth_chunk(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authchunk val; + + if (!ep->auth_enable) + return -EACCES; + + if (optlen != sizeof(struct sctp_authchunk)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + switch (val.sauth_chunk) { + case SCTP_CID_INIT: + case SCTP_CID_INIT_ACK: + case SCTP_CID_SHUTDOWN_COMPLETE: + case SCTP_CID_AUTH: + return -EINVAL; + } + + /* add this chunk id to the endpoint */ + return sctp_auth_ep_add_chunkid(ep, val.sauth_chunk); +} + +/* + * 7.1.19. Get or set the list of supported HMAC Identifiers (SCTP_HMAC_IDENT) + * + * This option gets or sets the list of HMAC algorithms that the local + * endpoint requires the peer to use. + */ +static int sctp_setsockopt_hmac_ident(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_hmacalgo *hmacs; + u32 idents; + int err; + + if (!ep->auth_enable) + return -EACCES; + + if (optlen < sizeof(struct sctp_hmacalgo)) + return -EINVAL; + + hmacs = memdup_user(optval, optlen); + if (IS_ERR(hmacs)) + return PTR_ERR(hmacs); + + idents = hmacs->shmac_num_idents; + if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || + (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) { + err = -EINVAL; + goto out; + } + + err = sctp_auth_ep_set_hmacs(ep, hmacs); +out: + kfree(hmacs); + return err; +} + +/* + * 7.1.20. Set a shared key (SCTP_AUTH_KEY) + * + * This option will set a shared secret key which is used to build an + * association shared key. + */ +static int sctp_setsockopt_auth_key(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authkey *authkey; + struct sctp_association *asoc; + int ret; + + if (!ep->auth_enable) + return -EACCES; + + if (optlen <= sizeof(struct sctp_authkey)) + return -EINVAL; + + authkey = memdup_user(optval, optlen); + if (IS_ERR(authkey)) + return PTR_ERR(authkey); + + if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { + ret = -EINVAL; + goto out; + } + + asoc = sctp_id2assoc(sk, authkey->sca_assoc_id); + if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) { + ret = -EINVAL; + goto out; + } + + ret = sctp_auth_set_key(ep, asoc, authkey); +out: + kzfree(authkey); + return ret; +} + +/* + * 7.1.21. Get or set the active shared key (SCTP_AUTH_ACTIVE_KEY) + * + * This option will get or set the active shared key to be used to build + * the association shared key. + */ +static int sctp_setsockopt_active_key(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (!ep->auth_enable) + return -EACCES; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber); +} + +/* + * 7.1.22. Delete a shared key (SCTP_AUTH_DELETE_KEY) + * + * This set option will delete a shared secret key from use. + */ +static int sctp_setsockopt_del_key(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (!ep->auth_enable) + return -EACCES; + + if (optlen != sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber); + +} + +/* + * 8.1.23 SCTP_AUTO_ASCONF + * + * This option will enable or disable the use of the automatic generation of + * ASCONF chunks to add and delete addresses to an existing association. Note + * that this option has two caveats namely: a) it only affects sockets that + * are bound to all addresses available to the SCTP stack, and b) the system + * administrator may have an overriding control that turns the ASCONF feature + * off no matter what setting the socket option may have. + * This option expects an integer boolean flag, where a non-zero value turns on + * the option, and a zero value turns off the option. + * Note. In this implementation, socket operation overrides default parameter + * being set by sysctl as well as FreeBSD implementation + */ +static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval, + unsigned int optlen) +{ + int val; + struct sctp_sock *sp = sctp_sk(sk); + + if (optlen < sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *)optval)) + return -EFAULT; + if (!sctp_is_ep_boundall(sk) && val) + return -EINVAL; + if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf)) + return 0; + + spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock); + if (val == 0 && sp->do_auto_asconf) { + list_del(&sp->auto_asconf_list); + sp->do_auto_asconf = 0; + } else if (val && !sp->do_auto_asconf) { + list_add_tail(&sp->auto_asconf_list, + &sock_net(sk)->sctp.auto_asconf_splist); + sp->do_auto_asconf = 1; + } + spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock); + return 0; +} + +/* + * SCTP_PEER_ADDR_THLDS + * + * This option allows us to alter the partially failed threshold for one or all + * transports in an association. See Section 6.1 of: + * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt + */ +static int sctp_setsockopt_paddr_thresholds(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + struct sctp_paddrthlds val; + struct sctp_transport *trans; + struct sctp_association *asoc; + + if (optlen < sizeof(struct sctp_paddrthlds)) + return -EINVAL; + if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, + sizeof(struct sctp_paddrthlds))) + return -EFAULT; + + + if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { + asoc = sctp_id2assoc(sk, val.spt_assoc_id); + if (!asoc) + return -ENOENT; + list_for_each_entry(trans, &asoc->peer.transport_addr_list, + transports) { + if (val.spt_pathmaxrxt) + trans->pathmaxrxt = val.spt_pathmaxrxt; + trans->pf_retrans = val.spt_pathpfthld; + } + + if (val.spt_pathmaxrxt) + asoc->pathmaxrxt = val.spt_pathmaxrxt; + asoc->pf_retrans = val.spt_pathpfthld; + } else { + trans = sctp_addr_id2transport(sk, &val.spt_address, + val.spt_assoc_id); + if (!trans) + return -ENOENT; + + if (val.spt_pathmaxrxt) + trans->pathmaxrxt = val.spt_pathmaxrxt; + trans->pf_retrans = val.spt_pathpfthld; + } + + return 0; +} + +static int sctp_setsockopt_recvrcvinfo(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *) optval)) + return -EFAULT; + + sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1; + + return 0; +} + +static int sctp_setsockopt_recvnxtinfo(struct sock *sk, + char __user *optval, + unsigned int optlen) +{ + int val; + + if (optlen < sizeof(int)) + return -EINVAL; + if (get_user(val, (int __user *) optval)) + return -EFAULT; + + sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1; + + return 0; +} + +/* API 6.2 setsockopt(), getsockopt() + * + * Applications use setsockopt() and getsockopt() to set or retrieve + * socket options. Socket options are used to change the default + * behavior of sockets calls. They are described in Section 7. + * + * The syntax is: + * + * ret = getsockopt(int sd, int level, int optname, void __user *optval, + * int __user *optlen); + * ret = setsockopt(int sd, int level, int optname, const void __user *optval, + * int optlen); + * + * sd - the socket descript. + * level - set to IPPROTO_SCTP for all SCTP options. + * optname - the option name. + * optval - the buffer to store the value of the option. + * optlen - the size of the buffer. + */ +static int sctp_setsockopt(struct sock *sk, int level, int optname, + char __user *optval, unsigned int optlen) +{ + int retval = 0; + + pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); + + /* I can hardly begin to describe how wrong this is. This is + * so broken as to be worse than useless. The API draft + * REALLY is NOT helpful here... I am not convinced that the + * semantics of setsockopt() with a level OTHER THAN SOL_SCTP + * are at all well-founded. + */ + if (level != SOL_SCTP) { + struct sctp_af *af = sctp_sk(sk)->pf->af; + retval = af->setsockopt(sk, level, optname, optval, optlen); + goto out_nounlock; + } + + lock_sock(sk); + + switch (optname) { + case SCTP_SOCKOPT_BINDX_ADD: + /* 'optlen' is the size of the addresses buffer. */ + retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, + optlen, SCTP_BINDX_ADD_ADDR); + break; + + case SCTP_SOCKOPT_BINDX_REM: + /* 'optlen' is the size of the addresses buffer. */ + retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, + optlen, SCTP_BINDX_REM_ADDR); + break; + + case SCTP_SOCKOPT_CONNECTX_OLD: + /* 'optlen' is the size of the addresses buffer. */ + retval = sctp_setsockopt_connectx_old(sk, + (struct sockaddr __user *)optval, + optlen); + break; + + case SCTP_SOCKOPT_CONNECTX: + /* 'optlen' is the size of the addresses buffer. */ + retval = sctp_setsockopt_connectx(sk, + (struct sockaddr __user *)optval, + optlen); + break; + + case SCTP_DISABLE_FRAGMENTS: + retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); + break; + + case SCTP_EVENTS: + retval = sctp_setsockopt_events(sk, optval, optlen); + break; + + case SCTP_AUTOCLOSE: + retval = sctp_setsockopt_autoclose(sk, optval, optlen); + break; + + case SCTP_PEER_ADDR_PARAMS: + retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); + break; + + case SCTP_DELAYED_SACK: + retval = sctp_setsockopt_delayed_ack(sk, optval, optlen); + break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_setsockopt_partial_delivery_point(sk, optval, optlen); + break; + + case SCTP_INITMSG: + retval = sctp_setsockopt_initmsg(sk, optval, optlen); + break; + case SCTP_DEFAULT_SEND_PARAM: + retval = sctp_setsockopt_default_send_param(sk, optval, + optlen); + break; + case SCTP_DEFAULT_SNDINFO: + retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen); + break; + case SCTP_PRIMARY_ADDR: + retval = sctp_setsockopt_primary_addr(sk, optval, optlen); + break; + case SCTP_SET_PEER_PRIMARY_ADDR: + retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); + break; + case SCTP_NODELAY: + retval = sctp_setsockopt_nodelay(sk, optval, optlen); + break; + case SCTP_RTOINFO: + retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); + break; + case SCTP_ASSOCINFO: + retval = sctp_setsockopt_associnfo(sk, optval, optlen); + break; + case SCTP_I_WANT_MAPPED_V4_ADDR: + retval = sctp_setsockopt_mappedv4(sk, optval, optlen); + break; + case SCTP_MAXSEG: + retval = sctp_setsockopt_maxseg(sk, optval, optlen); + break; + case SCTP_ADAPTATION_LAYER: + retval = sctp_setsockopt_adaptation_layer(sk, optval, optlen); + break; + case SCTP_CONTEXT: + retval = sctp_setsockopt_context(sk, optval, optlen); + break; + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_setsockopt_fragment_interleave(sk, optval, optlen); + break; + case SCTP_MAX_BURST: + retval = sctp_setsockopt_maxburst(sk, optval, optlen); + break; + case SCTP_AUTH_CHUNK: + retval = sctp_setsockopt_auth_chunk(sk, optval, optlen); + break; + case SCTP_HMAC_IDENT: + retval = sctp_setsockopt_hmac_ident(sk, optval, optlen); + break; + case SCTP_AUTH_KEY: + retval = sctp_setsockopt_auth_key(sk, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_setsockopt_active_key(sk, optval, optlen); + break; + case SCTP_AUTH_DELETE_KEY: + retval = sctp_setsockopt_del_key(sk, optval, optlen); + break; + case SCTP_AUTO_ASCONF: + retval = sctp_setsockopt_auto_asconf(sk, optval, optlen); + break; + case SCTP_PEER_ADDR_THLDS: + retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen); + break; + case SCTP_RECVRCVINFO: + retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen); + break; + case SCTP_RECVNXTINFO: + retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen); + break; + default: + retval = -ENOPROTOOPT; + break; + } + + release_sock(sk); + +out_nounlock: + return retval; +} + +/* API 3.1.6 connect() - UDP Style Syntax + * + * An application may use the connect() call in the UDP model to initiate an + * association without sending data. + * + * The syntax is: + * + * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); + * + * sd: the socket descriptor to have a new association added to. + * + * nam: the address structure (either struct sockaddr_in or struct + * sockaddr_in6 defined in RFC2553 [7]). + * + * len: the size of the address. + */ +static int sctp_connect(struct sock *sk, struct sockaddr *addr, + int addr_len) +{ + int err = 0; + struct sctp_af *af; + + lock_sock(sk); + + pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk, + addr, addr_len); + + /* Validate addr_len before calling common connect/connectx routine. */ + af = sctp_get_af_specific(addr->sa_family); + if (!af || addr_len < af->sockaddr_len) { + err = -EINVAL; + } else { + /* Pass correct addr len to common routine (so it knows there + * is only one address being passed. + */ + err = __sctp_connect(sk, addr, af->sockaddr_len, NULL); + } + + release_sock(sk); + return err; +} + +/* FIXME: Write comments. */ +static int sctp_disconnect(struct sock *sk, int flags) +{ + return -EOPNOTSUPP; /* STUB */ +} + +/* 4.1.4 accept() - TCP Style Syntax + * + * Applications use accept() call to remove an established SCTP + * association from the accept queue of the endpoint. A new socket + * descriptor will be returned from accept() to represent the newly + * formed association. + */ +static struct sock *sctp_accept(struct sock *sk, int flags, int *err) +{ + struct sctp_sock *sp; + struct sctp_endpoint *ep; + struct sock *newsk = NULL; + struct sctp_association *asoc; + long timeo; + int error = 0; + + lock_sock(sk); + + sp = sctp_sk(sk); + ep = sp->ep; + + if (!sctp_style(sk, TCP)) { + error = -EOPNOTSUPP; + goto out; + } + + if (!sctp_sstate(sk, LISTENING)) { + error = -EINVAL; + goto out; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + error = sctp_wait_for_accept(sk, timeo); + if (error) + goto out; + + /* We treat the list of associations on the endpoint as the accept + * queue and pick the first association on the list. + */ + asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); + + newsk = sp->pf->create_accept_sk(sk, asoc); + if (!newsk) { + error = -ENOMEM; + goto out; + } + + /* Populate the fields of the newsk from the oldsk and migrate the + * asoc to the newsk. + */ + sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); + +out: + release_sock(sk); + *err = error; + return newsk; +} + +/* The SCTP ioctl handler. */ +static int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) +{ + int rc = -ENOTCONN; + + lock_sock(sk); + + /* + * SEQPACKET-style sockets in LISTENING state are valid, for + * SCTP, so only discard TCP-style sockets in LISTENING state. + */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) + goto out; + + switch (cmd) { + case SIOCINQ: { + struct sk_buff *skb; + unsigned int amount = 0; + + skb = skb_peek(&sk->sk_receive_queue); + if (skb != NULL) { + /* + * We will only return the amount of this packet since + * that is all that will be read. + */ + amount = skb->len; + } + rc = put_user(amount, (int __user *)arg); + break; + } + default: + rc = -ENOIOCTLCMD; + break; + } +out: + release_sock(sk); + return rc; +} + +/* This is the function which gets called during socket creation to + * initialized the SCTP-specific portion of the sock. + * The sock structure should already be zero-filled memory. + */ +static int sctp_init_sock(struct sock *sk) +{ + struct net *net = sock_net(sk); + struct sctp_sock *sp; + + pr_debug("%s: sk:%p\n", __func__, sk); + + sp = sctp_sk(sk); + + /* Initialize the SCTP per socket area. */ + switch (sk->sk_type) { + case SOCK_SEQPACKET: + sp->type = SCTP_SOCKET_UDP; + break; + case SOCK_STREAM: + sp->type = SCTP_SOCKET_TCP; + break; + default: + return -ESOCKTNOSUPPORT; + } + + /* Initialize default send parameters. These parameters can be + * modified with the SCTP_DEFAULT_SEND_PARAM socket option. + */ + sp->default_stream = 0; + sp->default_ppid = 0; + sp->default_flags = 0; + sp->default_context = 0; + sp->default_timetolive = 0; + + sp->default_rcv_context = 0; + sp->max_burst = net->sctp.max_burst; + + sp->sctp_hmac_alg = net->sctp.sctp_hmac_alg; + + /* Initialize default setup parameters. These parameters + * can be modified with the SCTP_INITMSG socket option or + * overridden by the SCTP_INIT CMSG. + */ + sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; + sp->initmsg.sinit_max_instreams = sctp_max_instreams; + sp->initmsg.sinit_max_attempts = net->sctp.max_retrans_init; + sp->initmsg.sinit_max_init_timeo = net->sctp.rto_max; + + /* Initialize default RTO related parameters. These parameters can + * be modified for with the SCTP_RTOINFO socket option. + */ + sp->rtoinfo.srto_initial = net->sctp.rto_initial; + sp->rtoinfo.srto_max = net->sctp.rto_max; + sp->rtoinfo.srto_min = net->sctp.rto_min; + + /* Initialize default association related parameters. These parameters + * can be modified with the SCTP_ASSOCINFO socket option. + */ + sp->assocparams.sasoc_asocmaxrxt = net->sctp.max_retrans_association; + sp->assocparams.sasoc_number_peer_destinations = 0; + sp->assocparams.sasoc_peer_rwnd = 0; + sp->assocparams.sasoc_local_rwnd = 0; + sp->assocparams.sasoc_cookie_life = net->sctp.valid_cookie_life; + + /* Initialize default event subscriptions. By default, all the + * options are off. + */ + memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); + + /* Default Peer Address Parameters. These defaults can + * be modified via SCTP_PEER_ADDR_PARAMS + */ + sp->hbinterval = net->sctp.hb_interval; + sp->pathmaxrxt = net->sctp.max_retrans_path; + sp->pathmtu = 0; /* allow default discovery */ + sp->sackdelay = net->sctp.sack_timeout; + sp->sackfreq = 2; + sp->param_flags = SPP_HB_ENABLE | + SPP_PMTUD_ENABLE | + SPP_SACKDELAY_ENABLE; + + /* If enabled no SCTP message fragmentation will be performed. + * Configure through SCTP_DISABLE_FRAGMENTS socket option. + */ + sp->disable_fragments = 0; + + /* Enable Nagle algorithm by default. */ + sp->nodelay = 0; + + sp->recvrcvinfo = 0; + sp->recvnxtinfo = 0; + + /* Enable by default. */ + sp->v4mapped = 1; + + /* Auto-close idle associations after the configured + * number of seconds. A value of 0 disables this + * feature. Configure through the SCTP_AUTOCLOSE socket option, + * for UDP-style sockets only. + */ + sp->autoclose = 0; + + /* User specified fragmentation limit. */ + sp->user_frag = 0; + + sp->adaptation_ind = 0; + + sp->pf = sctp_get_pf_specific(sk->sk_family); + + /* Control variables for partial data delivery. */ + atomic_set(&sp->pd_mode, 0); + skb_queue_head_init(&sp->pd_lobby); + sp->frag_interleave = 0; + + /* Create a per socket endpoint structure. Even if we + * change the data structure relationships, this may still + * be useful for storing pre-connect address information. + */ + sp->ep = sctp_endpoint_new(sk, GFP_KERNEL); + if (!sp->ep) + return -ENOMEM; + + sp->hmac = NULL; + + sk->sk_destruct = sctp_destruct_sock; + + SCTP_DBG_OBJCNT_INC(sock); + + local_bh_disable(); + percpu_counter_inc(&sctp_sockets_allocated); + sock_prot_inuse_add(net, sk->sk_prot, 1); + + /* Nothing can fail after this block, otherwise + * sctp_destroy_sock() will be called without addr_wq_lock held + */ + if (net->sctp.default_auto_asconf) { + spin_lock(&sock_net(sk)->sctp.addr_wq_lock); + list_add_tail(&sp->auto_asconf_list, + &net->sctp.auto_asconf_splist); + sp->do_auto_asconf = 1; + spin_unlock(&sock_net(sk)->sctp.addr_wq_lock); + } else { + sp->do_auto_asconf = 0; + } + + local_bh_enable(); + + return 0; +} + +/* Cleanup any SCTP per socket resources. Must be called with + * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true + */ +static void sctp_destroy_sock(struct sock *sk) +{ + struct sctp_sock *sp; + + pr_debug("%s: sk:%p\n", __func__, sk); + + /* Release our hold on the endpoint. */ + sp = sctp_sk(sk); + /* This could happen during socket init, thus we bail out + * early, since the rest of the below is not setup either. + */ + if (sp->ep == NULL) + return; + + if (sp->do_auto_asconf) { + sp->do_auto_asconf = 0; + list_del(&sp->auto_asconf_list); + } + sctp_endpoint_free(sp->ep); + local_bh_disable(); + percpu_counter_dec(&sctp_sockets_allocated); + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); + local_bh_enable(); +} + +/* Triggered when there are no references on the socket anymore */ +static void sctp_destruct_sock(struct sock *sk) +{ + struct sctp_sock *sp = sctp_sk(sk); + + /* Free up the HMAC transform. */ + crypto_free_hash(sp->hmac); + + inet_sock_destruct(sk); +} + +/* API 4.1.7 shutdown() - TCP Style Syntax + * int shutdown(int socket, int how); + * + * sd - the socket descriptor of the association to be closed. + * how - Specifies the type of shutdown. The values are + * as follows: + * SHUT_RD + * Disables further receive operations. No SCTP + * protocol action is taken. + * SHUT_WR + * Disables further send operations, and initiates + * the SCTP shutdown sequence. + * SHUT_RDWR + * Disables further send and receive operations + * and initiates the SCTP shutdown sequence. + */ +static void sctp_shutdown(struct sock *sk, int how) +{ + struct net *net = sock_net(sk); + struct sctp_endpoint *ep; + struct sctp_association *asoc; + + if (!sctp_style(sk, TCP)) + return; + + if (how & SEND_SHUTDOWN) { + ep = sctp_sk(sk)->ep; + if (!list_empty(&ep->asocs)) { + asoc = list_entry(ep->asocs.next, + struct sctp_association, asocs); + sctp_primitive_SHUTDOWN(net, asoc, NULL); + } + } +} + +/* 7.2.1 Association Status (SCTP_STATUS) + + * Applications can retrieve current status information about an + * association, including association state, peer receiver window size, + * number of unacked data chunks, and number of data chunks pending + * receipt. This information is read-only. + */ +static int sctp_getsockopt_sctp_status(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_status status; + struct sctp_association *asoc = NULL; + struct sctp_transport *transport; + sctp_assoc_t associd; + int retval = 0; + + if (len < sizeof(status)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(status); + if (copy_from_user(&status, optval, len)) { + retval = -EFAULT; + goto out; + } + + associd = status.sstat_assoc_id; + asoc = sctp_id2assoc(sk, associd); + if (!asoc) { + retval = -EINVAL; + goto out; + } + + transport = asoc->peer.primary_path; + + status.sstat_assoc_id = sctp_assoc2id(asoc); + status.sstat_state = sctp_assoc_to_state(asoc); + status.sstat_rwnd = asoc->peer.rwnd; + status.sstat_unackdata = asoc->unack_data; + + status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); + status.sstat_instrms = asoc->c.sinit_max_instreams; + status.sstat_outstrms = asoc->c.sinit_num_ostreams; + status.sstat_fragmentation_point = asoc->frag_point; + status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); + memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr, + transport->af_specific->sockaddr_len); + /* Map ipv4 address into v4-mapped-on-v6 address. */ + sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk), + (union sctp_addr *)&status.sstat_primary.spinfo_address); + status.sstat_primary.spinfo_state = transport->state; + status.sstat_primary.spinfo_cwnd = transport->cwnd; + status.sstat_primary.spinfo_srtt = transport->srtt; + status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); + status.sstat_primary.spinfo_mtu = transport->pathmtu; + + if (status.sstat_primary.spinfo_state == SCTP_UNKNOWN) + status.sstat_primary.spinfo_state = SCTP_ACTIVE; + + if (put_user(len, optlen)) { + retval = -EFAULT; + goto out; + } + + pr_debug("%s: len:%d, state:%d, rwnd:%d, assoc_id:%d\n", + __func__, len, status.sstat_state, status.sstat_rwnd, + status.sstat_assoc_id); + + if (copy_to_user(optval, &status, len)) { + retval = -EFAULT; + goto out; + } + +out: + return retval; +} + + +/* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) + * + * Applications can retrieve information about a specific peer address + * of an association, including its reachability state, congestion + * window, and retransmission timer values. This information is + * read-only. + */ +static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_paddrinfo pinfo; + struct sctp_transport *transport; + int retval = 0; + + if (len < sizeof(pinfo)) { + retval = -EINVAL; + goto out; + } + + len = sizeof(pinfo); + if (copy_from_user(&pinfo, optval, len)) { + retval = -EFAULT; + goto out; + } + + transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, + pinfo.spinfo_assoc_id); + if (!transport) + return -EINVAL; + + pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); + pinfo.spinfo_state = transport->state; + pinfo.spinfo_cwnd = transport->cwnd; + pinfo.spinfo_srtt = transport->srtt; + pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); + pinfo.spinfo_mtu = transport->pathmtu; + + if (pinfo.spinfo_state == SCTP_UNKNOWN) + pinfo.spinfo_state = SCTP_ACTIVE; + + if (put_user(len, optlen)) { + retval = -EFAULT; + goto out; + } + + if (copy_to_user(optval, &pinfo, len)) { + retval = -EFAULT; + goto out; + } + +out: + return retval; +} + +/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) + * + * This option is a on/off flag. If enabled no SCTP message + * fragmentation will be performed. Instead if a message being sent + * exceeds the current PMTU size, the message will NOT be sent and + * instead a error will be indicated to the user. + */ +static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + val = (sctp_sk(sk)->disable_fragments == 1); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; +} + +/* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) + * + * This socket option is used to specify various notifications and + * ancillary data the user wishes to receive. + */ +static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, + int __user *optlen) +{ + if (len <= 0) + return -EINVAL; + if (len > sizeof(struct sctp_event_subscribe)) + len = sizeof(struct sctp_event_subscribe); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) + return -EFAULT; + return 0; +} + +/* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) + * + * This socket option is applicable to the UDP-style socket only. When + * set it will cause associations that are idle for more than the + * specified number of seconds to automatically close. An association + * being idle is defined an association that has NOT sent or received + * user data. The special value of '0' indicates that no automatic + * close of any associations should be performed. The option expects an + * integer defining the number of seconds of idle time before an + * association is closed. + */ +static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) +{ + /* Applicable to UDP-style socket only */ + if (sctp_style(sk, TCP)) + return -EOPNOTSUPP; + if (len < sizeof(int)) + return -EINVAL; + len = sizeof(int); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) + return -EFAULT; + return 0; +} + +/* Helper routine to branch off an association to a new socket. */ +int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) +{ + struct sctp_association *asoc = sctp_id2assoc(sk, id); + struct sctp_sock *sp = sctp_sk(sk); + struct socket *sock; + int err = 0; + + if (!asoc) + return -EINVAL; + + /* An association cannot be branched off from an already peeled-off + * socket, nor is this supported for tcp style sockets. + */ + if (!sctp_style(sk, UDP)) + return -EINVAL; + + /* Create a new socket. */ + err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); + if (err < 0) + return err; + + sctp_copy_sock(sock->sk, sk, asoc); + + /* Make peeled-off sockets more like 1-1 accepted sockets. + * Set the daddr and initialize id to something more random + */ + sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk); + + /* Populate the fields of the newsk from the oldsk and migrate the + * asoc to the newsk. + */ + sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); + + *sockp = sock; + + return err; +} +EXPORT_SYMBOL(sctp_do_peeloff); + +static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) +{ + sctp_peeloff_arg_t peeloff; + struct socket *newsock; + struct file *newfile; + int retval = 0; + + if (len < sizeof(sctp_peeloff_arg_t)) + return -EINVAL; + len = sizeof(sctp_peeloff_arg_t); + if (copy_from_user(&peeloff, optval, len)) + return -EFAULT; + + retval = sctp_do_peeloff(sk, peeloff.associd, &newsock); + if (retval < 0) + goto out; + + /* Map the socket to an unused fd that can be returned to the user. */ + retval = get_unused_fd_flags(0); + if (retval < 0) { + sock_release(newsock); + goto out; + } + + newfile = sock_alloc_file(newsock, 0, NULL); + if (unlikely(IS_ERR(newfile))) { + put_unused_fd(retval); + sock_release(newsock); + return PTR_ERR(newfile); + } + + pr_debug("%s: sk:%p, newsk:%p, sd:%d\n", __func__, sk, newsock->sk, + retval); + + /* Return the fd mapped to the new socket. */ + if (put_user(len, optlen)) { + fput(newfile); + put_unused_fd(retval); + return -EFAULT; + } + peeloff.sd = retval; + if (copy_to_user(optval, &peeloff, len)) { + fput(newfile); + put_unused_fd(retval); + return -EFAULT; + } + fd_install(retval, newfile); +out: + return retval; +} + +/* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) + * + * Applications can enable or disable heartbeats for any peer address of + * an association, modify an address's heartbeat interval, force a + * heartbeat to be sent immediately, and adjust the address's maximum + * number of retransmissions sent before an address is considered + * unreachable. The following structure is used to access and modify an + * address's parameters: + * + * struct sctp_paddrparams { + * sctp_assoc_t spp_assoc_id; + * struct sockaddr_storage spp_address; + * uint32_t spp_hbinterval; + * uint16_t spp_pathmaxrxt; + * uint32_t spp_pathmtu; + * uint32_t spp_sackdelay; + * uint32_t spp_flags; + * }; + * + * spp_assoc_id - (one-to-many style socket) This is filled in the + * application, and identifies the association for + * this query. + * spp_address - This specifies which address is of interest. + * spp_hbinterval - This contains the value of the heartbeat interval, + * in milliseconds. If a value of zero + * is present in this field then no changes are to + * be made to this parameter. + * spp_pathmaxrxt - This contains the maximum number of + * retransmissions before this address shall be + * considered unreachable. If a value of zero + * is present in this field then no changes are to + * be made to this parameter. + * spp_pathmtu - When Path MTU discovery is disabled the value + * specified here will be the "fixed" path mtu. + * Note that if the spp_address field is empty + * then all associations on this address will + * have this fixed path mtu set upon them. + * + * spp_sackdelay - When delayed sack is enabled, this value specifies + * the number of milliseconds that sacks will be delayed + * for. This value will apply to all addresses of an + * association if the spp_address field is empty. Note + * also, that if delayed sack is enabled and this + * value is set to 0, no change is made to the last + * recorded delayed sack timer value. + * + * spp_flags - These flags are used to control various features + * on an association. The flag field may contain + * zero or more of the following options. + * + * SPP_HB_ENABLE - Enable heartbeats on the + * specified address. Note that if the address + * field is empty all addresses for the association + * have heartbeats enabled upon them. + * + * SPP_HB_DISABLE - Disable heartbeats on the + * speicifed address. Note that if the address + * field is empty all addresses for the association + * will have their heartbeats disabled. Note also + * that SPP_HB_ENABLE and SPP_HB_DISABLE are + * mutually exclusive, only one of these two should + * be specified. Enabling both fields will have + * undetermined results. + * + * SPP_HB_DEMAND - Request a user initiated heartbeat + * to be made immediately. + * + * SPP_PMTUD_ENABLE - This field will enable PMTU + * discovery upon the specified address. Note that + * if the address feild is empty then all addresses + * on the association are effected. + * + * SPP_PMTUD_DISABLE - This field will disable PMTU + * discovery upon the specified address. Note that + * if the address feild is empty then all addresses + * on the association are effected. Not also that + * SPP_PMTUD_ENABLE and SPP_PMTUD_DISABLE are mutually + * exclusive. Enabling both will have undetermined + * results. + * + * SPP_SACKDELAY_ENABLE - Setting this flag turns + * on delayed sack. The time specified in spp_sackdelay + * is used to specify the sack delay for this address. Note + * that if spp_address is empty then all addresses will + * enable delayed sack and take on the sack delay + * value specified in spp_sackdelay. + * SPP_SACKDELAY_DISABLE - Setting this flag turns + * off delayed sack. If the spp_address field is blank then + * delayed sack is disabled for the entire association. Note + * also that this field is mutually exclusive to + * SPP_SACKDELAY_ENABLE, setting both will have undefined + * results. + */ +static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_paddrparams params; + struct sctp_transport *trans = NULL; + struct sctp_association *asoc = NULL; + struct sctp_sock *sp = sctp_sk(sk); + + if (len < sizeof(struct sctp_paddrparams)) + return -EINVAL; + len = sizeof(struct sctp_paddrparams); + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + + /* If an address other than INADDR_ANY is specified, and + * no transport is found, then the request is invalid. + */ + if (!sctp_is_any(sk, (union sctp_addr *)¶ms.spp_address)) { + trans = sctp_addr_id2transport(sk, ¶ms.spp_address, + params.spp_assoc_id); + if (!trans) { + pr_debug("%s: failed no transport\n", __func__); + return -EINVAL; + } + } + + /* Get association, if assoc_id != 0 and the socket is a one + * to many style socket, and an association was not found, then + * the id was invalid. + */ + asoc = sctp_id2assoc(sk, params.spp_assoc_id); + if (!asoc && params.spp_assoc_id && sctp_style(sk, UDP)) { + pr_debug("%s: failed no association\n", __func__); + return -EINVAL; + } + + if (trans) { + /* Fetch transport values. */ + params.spp_hbinterval = jiffies_to_msecs(trans->hbinterval); + params.spp_pathmtu = trans->pathmtu; + params.spp_pathmaxrxt = trans->pathmaxrxt; + params.spp_sackdelay = jiffies_to_msecs(trans->sackdelay); + + /*draft-11 doesn't say what to return in spp_flags*/ + params.spp_flags = trans->param_flags; + } else if (asoc) { + /* Fetch association values. */ + params.spp_hbinterval = jiffies_to_msecs(asoc->hbinterval); + params.spp_pathmtu = asoc->pathmtu; + params.spp_pathmaxrxt = asoc->pathmaxrxt; + params.spp_sackdelay = jiffies_to_msecs(asoc->sackdelay); + + /*draft-11 doesn't say what to return in spp_flags*/ + params.spp_flags = asoc->param_flags; + } else { + /* Fetch socket values. */ + params.spp_hbinterval = sp->hbinterval; + params.spp_pathmtu = sp->pathmtu; + params.spp_sackdelay = sp->sackdelay; + params.spp_pathmaxrxt = sp->pathmaxrxt; + + /*draft-11 doesn't say what to return in spp_flags*/ + params.spp_flags = sp->param_flags; + } + + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + + if (put_user(len, optlen)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.23. Get or set delayed ack timer (SCTP_DELAYED_SACK) + * + * This option will effect the way delayed acks are performed. This + * option allows you to get or set the delayed ack time, in + * milliseconds. It also allows changing the delayed ack frequency. + * Changing the frequency to 1 disables the delayed sack algorithm. If + * the assoc_id is 0, then this sets or gets the endpoints default + * values. If the assoc_id field is non-zero, then the set or get + * effects the specified association for the one to many model (the + * assoc_id field is ignored by the one to one model). Note that if + * sack_delay or sack_freq are 0 when setting this option, then the + * current values will remain unchanged. + * + * struct sctp_sack_info { + * sctp_assoc_t sack_assoc_id; + * uint32_t sack_delay; + * uint32_t sack_freq; + * }; + * + * sack_assoc_id - This parameter, indicates which association the user + * is performing an action upon. Note that if this field's value is + * zero then the endpoints default value is changed (effecting future + * associations only). + * + * sack_delay - This parameter contains the number of milliseconds that + * the user is requesting the delayed ACK timer be set to. Note that + * this value is defined in the standard to be between 200 and 500 + * milliseconds. + * + * sack_freq - This parameter contains the number of packets that must + * be received before a sack is sent without waiting for the delay + * timer to expire. The default value for this is 2, setting this + * value to 1 will disable the delayed sack algorithm. + */ +static int sctp_getsockopt_delayed_ack(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_sack_info params; + struct sctp_association *asoc = NULL; + struct sctp_sock *sp = sctp_sk(sk); + + if (len >= sizeof(struct sctp_sack_info)) { + len = sizeof(struct sctp_sack_info); + + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + } else if (len == sizeof(struct sctp_assoc_value)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of struct sctp_assoc_value in delayed_ack socket option.\n" + "Use struct sctp_sack_info instead\n", + current->comm, task_pid_nr(current)); + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + } else + return -EINVAL; + + /* Get association, if sack_assoc_id != 0 and the socket is a one + * to many style socket, and an association was not found, then + * the id was invalid. + */ + asoc = sctp_id2assoc(sk, params.sack_assoc_id); + if (!asoc && params.sack_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) { + /* Fetch association values. */ + if (asoc->param_flags & SPP_SACKDELAY_ENABLE) { + params.sack_delay = jiffies_to_msecs( + asoc->sackdelay); + params.sack_freq = asoc->sackfreq; + + } else { + params.sack_delay = 0; + params.sack_freq = 1; + } + } else { + /* Fetch socket values. */ + if (sp->param_flags & SPP_SACKDELAY_ENABLE) { + params.sack_delay = sp->sackdelay; + params.sack_freq = sp->sackfreq; + } else { + params.sack_delay = 0; + params.sack_freq = 1; + } + } + + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + + if (put_user(len, optlen)) + return -EFAULT; + + return 0; +} + +/* 7.1.3 Initialization Parameters (SCTP_INITMSG) + * + * Applications can specify protocol parameters for the default association + * initialization. The option name argument to setsockopt() and getsockopt() + * is SCTP_INITMSG. + * + * Setting initialization parameters is effective only on an unconnected + * socket (for UDP-style sockets only future associations are effected + * by the change). With TCP-style sockets, this option is inherited by + * sockets derived from a listener socket. + */ +static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) +{ + if (len < sizeof(struct sctp_initmsg)) + return -EINVAL; + len = sizeof(struct sctp_initmsg); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) + return -EFAULT; + return 0; +} + + +static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_association *asoc; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_transport *from; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + size_t space_left; + int bytes_copied; + + if (len < sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* For UDP-style sockets, id specifies the association to query. */ + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + + to = optval + offsetof(struct sctp_getaddrs, addrs); + space_left = len - offsetof(struct sctp_getaddrs, addrs); + + list_for_each_entry(from, &asoc->peer.transport_addr_list, + transports) { + memcpy(&temp, &from->ipaddr, sizeof(temp)); + addrlen = sctp_get_pf_specific(sk->sk_family) + ->addr_to_user(sp, &temp); + if (space_left < addrlen) + return -ENOMEM; + if (copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; + cnt++; + space_left -= addrlen; + } + + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) + return -EFAULT; + bytes_copied = ((char __user *)to) - optval; + if (put_user(bytes_copied, optlen)) + return -EFAULT; + + return 0; +} + +static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to, + size_t space_left, int *bytes_copied) +{ + struct sctp_sockaddr_entry *addr; + union sctp_addr temp; + int cnt = 0; + int addrlen; + struct net *net = sock_net(sk); + + rcu_read_lock(); + list_for_each_entry_rcu(addr, &net->sctp.local_addr_list, list) { + if (!addr->valid) + continue; + + if ((PF_INET == sk->sk_family) && + (AF_INET6 == addr->a.sa.sa_family)) + continue; + if ((PF_INET6 == sk->sk_family) && + inet_v6_ipv6only(sk) && + (AF_INET == addr->a.sa.sa_family)) + continue; + memcpy(&temp, &addr->a, sizeof(temp)); + if (!temp.v4.sin_port) + temp.v4.sin_port = htons(port); + + addrlen = sctp_get_pf_specific(sk->sk_family) + ->addr_to_user(sctp_sk(sk), &temp); + + if (space_left < addrlen) { + cnt = -ENOMEM; + break; + } + memcpy(to, &temp, addrlen); + + to += addrlen; + cnt++; + space_left -= addrlen; + *bytes_copied += addrlen; + } + rcu_read_unlock(); + + return cnt; +} + + +static int sctp_getsockopt_local_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_bind_addr *bp; + struct sctp_association *asoc; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_sockaddr_entry *addr; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + int err = 0; + size_t space_left; + int bytes_copied = 0; + void *addrs; + void *buf; + + if (len < sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* + * For UDP-style sockets, id specifies the association to query. + * If the id field is set to the value '0' then the locally bound + * addresses are returned without regard to any particular + * association. + */ + if (0 == getaddrs.assoc_id) { + bp = &sctp_sk(sk)->ep->base.bind_addr; + } else { + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + bp = &asoc->base.bind_addr; + } + + to = optval + offsetof(struct sctp_getaddrs, addrs); + space_left = len - offsetof(struct sctp_getaddrs, addrs); + + addrs = kmalloc(space_left, GFP_KERNEL); + if (!addrs) + return -ENOMEM; + + /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid + * addresses from the global local address list. + */ + if (sctp_list_single_entry(&bp->address_list)) { + addr = list_entry(bp->address_list.next, + struct sctp_sockaddr_entry, list); + if (sctp_is_any(sk, &addr->a)) { + cnt = sctp_copy_laddrs(sk, bp->port, addrs, + space_left, &bytes_copied); + if (cnt < 0) { + err = cnt; + goto out; + } + goto copy_getaddrs; + } + } + + buf = addrs; + /* Protection on the bound address list is not needed since + * in the socket option context we hold a socket lock and + * thus the bound address list can't change. + */ + list_for_each_entry(addr, &bp->address_list, list) { + memcpy(&temp, &addr->a, sizeof(temp)); + addrlen = sctp_get_pf_specific(sk->sk_family) + ->addr_to_user(sp, &temp); + if (space_left < addrlen) { + err = -ENOMEM; /*fixme: right error?*/ + goto out; + } + memcpy(buf, &temp, addrlen); + buf += addrlen; + bytes_copied += addrlen; + cnt++; + space_left -= addrlen; + } + +copy_getaddrs: + if (copy_to_user(to, addrs, bytes_copied)) { + err = -EFAULT; + goto out; + } + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) { + err = -EFAULT; + goto out; + } + if (put_user(bytes_copied, optlen)) + err = -EFAULT; +out: + kfree(addrs); + return err; +} + +/* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) + * + * Requests that the local SCTP stack use the enclosed peer address as + * the association primary. The enclosed address must be one of the + * association peer's addresses. + */ +static int sctp_getsockopt_primary_addr(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_prim prim; + struct sctp_association *asoc; + struct sctp_sock *sp = sctp_sk(sk); + + if (len < sizeof(struct sctp_prim)) + return -EINVAL; + + len = sizeof(struct sctp_prim); + + if (copy_from_user(&prim, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); + if (!asoc) + return -EINVAL; + + if (!asoc->peer.primary_path) + return -ENOTCONN; + + memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, + asoc->peer.primary_path->af_specific->sockaddr_len); + + sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp, + (union sctp_addr *)&prim.ssp_addr); + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &prim, len)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.11 Set Adaptation Layer Indicator (SCTP_ADAPTATION_LAYER) + * + * Requests that the local endpoint set the specified Adaptation Layer + * Indication parameter for all future INIT and INIT-ACK exchanges. + */ +static int sctp_getsockopt_adaptation_layer(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_setadaptation adaptation; + + if (len < sizeof(struct sctp_setadaptation)) + return -EINVAL; + + len = sizeof(struct sctp_setadaptation); + + adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind; + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &adaptation, len)) + return -EFAULT; + + return 0; +} + +/* + * + * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) + * + * Applications that wish to use the sendto() system call may wish to + * specify a default set of parameters that would normally be supplied + * through the inclusion of ancillary data. This socket option allows + * such an application to set the default sctp_sndrcvinfo structure. + + + * The application that wishes to use this socket option simply passes + * in to this call the sctp_sndrcvinfo structure defined in Section + * 5.2.2) The input parameters accepted by this call include + * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, + * sinfo_timetolive. The user must provide the sinfo_assoc_id field in + * to this call if the caller is using the UDP model. + * + * For getsockopt, it get the default sctp_sndrcvinfo structure. + */ +static int sctp_getsockopt_default_send_param(struct sock *sk, + int len, char __user *optval, + int __user *optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + struct sctp_sndrcvinfo info; + + if (len < sizeof(info)) + return -EINVAL; + + len = sizeof(info); + + if (copy_from_user(&info, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); + if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + if (asoc) { + info.sinfo_stream = asoc->default_stream; + info.sinfo_flags = asoc->default_flags; + info.sinfo_ppid = asoc->default_ppid; + info.sinfo_context = asoc->default_context; + info.sinfo_timetolive = asoc->default_timetolive; + } else { + info.sinfo_stream = sp->default_stream; + info.sinfo_flags = sp->default_flags; + info.sinfo_ppid = sp->default_ppid; + info.sinfo_context = sp->default_context; + info.sinfo_timetolive = sp->default_timetolive; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &info, len)) + return -EFAULT; + + return 0; +} + +/* RFC6458, Section 8.1.31. Set/get Default Send Parameters + * (SCTP_DEFAULT_SNDINFO) + */ +static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + struct sctp_sndinfo info; + + if (len < sizeof(info)) + return -EINVAL; + + len = sizeof(info); + + if (copy_from_user(&info, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, info.snd_assoc_id); + if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + if (asoc) { + info.snd_sid = asoc->default_stream; + info.snd_flags = asoc->default_flags; + info.snd_ppid = asoc->default_ppid; + info.snd_context = asoc->default_context; + } else { + info.snd_sid = sp->default_stream; + info.snd_flags = sp->default_flags; + info.snd_ppid = sp->default_ppid; + info.snd_context = sp->default_context; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &info, len)) + return -EFAULT; + + return 0; +} + +/* + * + * 7.1.5 SCTP_NODELAY + * + * Turn on/off any Nagle-like algorithm. This means that packets are + * generally sent as soon as possible and no unnecessary delays are + * introduced, at the cost of more packets in the network. Expects an + * integer boolean flag. + */ + +static int sctp_getsockopt_nodelay(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + val = (sctp_sk(sk)->nodelay == 1); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; +} + +/* + * + * 7.1.1 SCTP_RTOINFO + * + * The protocol parameters used to initialize and bound retransmission + * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access + * and modify these parameters. + * All parameters are time values, in milliseconds. A value of 0, when + * modifying the parameters, indicates that the current value should not + * be changed. + * + */ +static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { + struct sctp_rtoinfo rtoinfo; + struct sctp_association *asoc; + + if (len < sizeof (struct sctp_rtoinfo)) + return -EINVAL; + + len = sizeof(struct sctp_rtoinfo); + + if (copy_from_user(&rtoinfo, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); + + if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + /* Values corresponding to the specific association. */ + if (asoc) { + rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); + rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); + rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); + } else { + /* Values corresponding to the endpoint. */ + struct sctp_sock *sp = sctp_sk(sk); + + rtoinfo.srto_initial = sp->rtoinfo.srto_initial; + rtoinfo.srto_max = sp->rtoinfo.srto_max; + rtoinfo.srto_min = sp->rtoinfo.srto_min; + } + + if (put_user(len, optlen)) + return -EFAULT; + + if (copy_to_user(optval, &rtoinfo, len)) + return -EFAULT; + + return 0; +} + +/* + * + * 7.1.2 SCTP_ASSOCINFO + * + * This option is used to tune the maximum retransmission attempts + * of the association. + * Returns an error if the new association retransmission value is + * greater than the sum of the retransmission value of the peer. + * See [SCTP] for more information. + * + */ +static int sctp_getsockopt_associnfo(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + + struct sctp_assocparams assocparams; + struct sctp_association *asoc; + struct list_head *pos; + int cnt = 0; + + if (len < sizeof (struct sctp_assocparams)) + return -EINVAL; + + len = sizeof(struct sctp_assocparams); + + if (copy_from_user(&assocparams, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); + + if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + /* Values correspoinding to the specific association */ + if (asoc) { + assocparams.sasoc_asocmaxrxt = asoc->max_retrans; + assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; + assocparams.sasoc_local_rwnd = asoc->a_rwnd; + assocparams.sasoc_cookie_life = ktime_to_ms(asoc->cookie_life); + + list_for_each(pos, &asoc->peer.transport_addr_list) { + cnt++; + } + + assocparams.sasoc_number_peer_destinations = cnt; + } else { + /* Values corresponding to the endpoint */ + struct sctp_sock *sp = sctp_sk(sk); + + assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; + assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; + assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; + assocparams.sasoc_cookie_life = + sp->assocparams.sasoc_cookie_life; + assocparams.sasoc_number_peer_destinations = + sp->assocparams. + sasoc_number_peer_destinations; + } + + if (put_user(len, optlen)) + return -EFAULT; + + if (copy_to_user(optval, &assocparams, len)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) + * + * This socket option is a boolean flag which turns on or off mapped V4 + * addresses. If this option is turned on and the socket is type + * PF_INET6, then IPv4 addresses will be mapped to V6 representation. + * If this option is turned off, then no mapping will be done of V4 + * addresses and a user will receive both PF_INET6 and PF_INET type + * addresses on the socket. + */ +static int sctp_getsockopt_mappedv4(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + struct sctp_sock *sp = sctp_sk(sk); + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + val = sp->v4mapped; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.29. Set or Get the default context (SCTP_CONTEXT) + * (chapter and verse is quoted at sctp_setsockopt_context()) + */ +static int sctp_getsockopt_context(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; + + if (len < sizeof(struct sctp_assoc_value)) + return -EINVAL; + + len = sizeof(struct sctp_assoc_value); + + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + + sp = sctp_sk(sk); + + if (params.assoc_id != 0) { + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc) + return -EINVAL; + params.assoc_value = asoc->default_rcv_context; + } else { + params.assoc_value = sp->default_rcv_context; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + + return 0; +} + +/* + * 8.1.16. Get or Set the Maximum Fragmentation Size (SCTP_MAXSEG) + * This option will get or set the maximum size to put in any outgoing + * SCTP DATA chunk. If a message is larger than this size it will be + * fragmented by SCTP into the specified size. Note that the underlying + * SCTP implementation may fragment into smaller sized chunks when the + * PMTU of the underlying association is smaller than the value set by + * the user. The default value for this option is '0' which indicates + * the user is NOT limiting fragmentation and only the PMTU will effect + * SCTP's choice of DATA chunk size. Note also that values set larger + * than the maximum size of an IP datagram will effectively let SCTP + * control fragmentation (i.e. the same as setting this option to 0). + * + * The following structure is used to access and modify this parameter: + * + * struct sctp_assoc_value { + * sctp_assoc_t assoc_id; + * uint32_t assoc_value; + * }; + * + * assoc_id: This parameter is ignored for one-to-one style sockets. + * For one-to-many style sockets this parameter indicates which + * association the user is performing an action upon. Note that if + * this field's value is zero then the endpoints default value is + * changed (effecting future associations only). + * assoc_value: This parameter specifies the maximum size in bytes. + */ +static int sctp_getsockopt_maxseg(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_association *asoc; + + if (len == sizeof(int)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of int in maxseg socket option.\n" + "Use struct sctp_assoc_value instead\n", + current->comm, task_pid_nr(current)); + params.assoc_id = 0; + } else if (len >= sizeof(struct sctp_assoc_value)) { + len = sizeof(struct sctp_assoc_value); + if (copy_from_user(¶ms, optval, sizeof(params))) + return -EFAULT; + } else + return -EINVAL; + + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc && params.assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + params.assoc_value = asoc->frag_point; + else + params.assoc_value = sctp_sk(sk)->user_frag; + + if (put_user(len, optlen)) + return -EFAULT; + if (len == sizeof(int)) { + if (copy_to_user(optval, ¶ms.assoc_value, len)) + return -EFAULT; + } else { + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + } + + return 0; +} + +/* + * 7.1.24. Get or set fragmented interleave (SCTP_FRAGMENT_INTERLEAVE) + * (chapter and verse is quoted at sctp_setsockopt_fragment_interleave()) + */ +static int sctp_getsockopt_fragment_interleave(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + + val = sctp_sk(sk)->frag_interleave; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.25. Set or Get the sctp partial delivery point + * (chapter and verse is quoted at sctp_setsockopt_partial_delivery_point()) + */ +static int sctp_getsockopt_partial_delivery_point(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + u32 val; + + if (len < sizeof(u32)) + return -EINVAL; + + len = sizeof(u32); + + val = sctp_sk(sk)->pd_point; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +/* + * 7.1.28. Set or Get the maximum burst (SCTP_MAX_BURST) + * (chapter and verse is quoted at sctp_setsockopt_maxburst()) + */ +static int sctp_getsockopt_maxburst(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; + + if (len == sizeof(int)) { + pr_warn_ratelimited(DEPRECATED + "%s (pid %d) " + "Use of int in max_burst socket option.\n" + "Use struct sctp_assoc_value instead\n", + current->comm, task_pid_nr(current)); + params.assoc_id = 0; + } else if (len >= sizeof(struct sctp_assoc_value)) { + len = sizeof(struct sctp_assoc_value); + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + } else + return -EINVAL; + + sp = sctp_sk(sk); + + if (params.assoc_id != 0) { + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc) + return -EINVAL; + params.assoc_value = asoc->max_burst; + } else + params.assoc_value = sp->max_burst; + + if (len == sizeof(int)) { + if (copy_to_user(optval, ¶ms.assoc_value, len)) + return -EFAULT; + } else { + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + } + + return 0; + +} + +static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_hmacalgo __user *p = (void __user *)optval; + struct sctp_hmac_algo_param *hmacs; + __u16 data_len = 0; + u32 num_idents; + + if (!ep->auth_enable) + return -EACCES; + + hmacs = ep->auth_hmacs_list; + data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t); + + if (len < sizeof(struct sctp_hmacalgo) + data_len) + return -EINVAL; + + len = sizeof(struct sctp_hmacalgo) + data_len; + num_idents = data_len / sizeof(u16); + + if (put_user(len, optlen)) + return -EFAULT; + if (put_user(num_idents, &p->shmac_num_idents)) + return -EFAULT; + if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) + return -EFAULT; + return 0; +} + +static int sctp_getsockopt_active_key(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authkeyid val; + struct sctp_association *asoc; + + if (!ep->auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authkeyid)) + return -EINVAL; + if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid))) + return -EFAULT; + + asoc = sctp_id2assoc(sk, val.scact_assoc_id); + if (!asoc && val.scact_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + val.scact_keynumber = asoc->active_key_id; + else + val.scact_keynumber = ep->active_key_id; + + len = sizeof(struct sctp_authkeyid); + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authchunks __user *p = (void __user *)optval; + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + u32 num_chunks = 0; + char __user *to; + + if (!ep->auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = p->gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc) + return -EINVAL; + + ch = asoc->peer.peer_chunks; + if (!ch) + goto num; + + /* See if the user provided enough room for all the data */ + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); + if (len < num_chunks) + return -EINVAL; + + if (copy_to_user(to, ch->chunks, num_chunks)) + return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; + if (put_user(len, optlen)) + return -EFAULT; + if (put_user(num_chunks, &p->gauth_number_of_chunks)) + return -EFAULT; + return 0; +} + +static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + struct sctp_authchunks __user *p = (void __user *)optval; + struct sctp_authchunks val; + struct sctp_association *asoc; + struct sctp_chunks_param *ch; + u32 num_chunks = 0; + char __user *to; + + if (!ep->auth_enable) + return -EACCES; + + if (len < sizeof(struct sctp_authchunks)) + return -EINVAL; + + if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks))) + return -EFAULT; + + to = p->gauth_chunks; + asoc = sctp_id2assoc(sk, val.gauth_assoc_id); + if (!asoc && val.gauth_assoc_id && sctp_style(sk, UDP)) + return -EINVAL; + + if (asoc) + ch = (struct sctp_chunks_param *)asoc->c.auth_chunks; + else + ch = ep->auth_chunk_list; + + if (!ch) + goto num; + + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); + if (len < sizeof(struct sctp_authchunks) + num_chunks) + return -EINVAL; + + if (copy_to_user(to, ch->chunks, num_chunks)) + return -EFAULT; +num: + len = sizeof(struct sctp_authchunks) + num_chunks; + if (put_user(len, optlen)) + return -EFAULT; + if (put_user(num_chunks, &p->gauth_number_of_chunks)) + return -EFAULT; + + return 0; +} + +/* + * 8.2.5. Get the Current Number of Associations (SCTP_GET_ASSOC_NUMBER) + * This option gets the current number of associations that are attached + * to a one-to-many style socket. The option value is an uint32_t. + */ +static int sctp_getsockopt_assoc_number(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + u32 val = 0; + + if (sctp_style(sk, TCP)) + return -EOPNOTSUPP; + + if (len < sizeof(u32)) + return -EINVAL; + + len = sizeof(u32); + + list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { + val++; + } + + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +/* + * 8.1.23 SCTP_AUTO_ASCONF + * See the corresponding setsockopt entry as description + */ +static int sctp_getsockopt_auto_asconf(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + int val = 0; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk)) + val = 1; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + return 0; +} + +/* + * 8.2.6. Get the Current Identifiers of Associations + * (SCTP_GET_ASSOC_ID_LIST) + * + * This option gets the current list of SCTP association identifiers of + * the SCTP associations handled by a one-to-many style socket. + */ +static int sctp_getsockopt_assoc_ids(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_association *asoc; + struct sctp_assoc_ids *ids; + u32 num = 0; + + if (sctp_style(sk, TCP)) + return -EOPNOTSUPP; + + if (len < sizeof(struct sctp_assoc_ids)) + return -EINVAL; + + list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { + num++; + } + + if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num) + return -EINVAL; + + len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num; + + ids = kmalloc(len, GFP_KERNEL); + if (unlikely(!ids)) + return -ENOMEM; + + ids->gaids_number_of_ids = num; + num = 0; + list_for_each_entry(asoc, &(sp->ep->asocs), asocs) { + ids->gaids_assoc_id[num++] = asoc->assoc_id; + } + + if (put_user(len, optlen) || copy_to_user(optval, ids, len)) { + kfree(ids); + return -EFAULT; + } + + kfree(ids); + return 0; +} + +/* + * SCTP_PEER_ADDR_THLDS + * + * This option allows us to fetch the partially failed threshold for one or all + * transports in an association. See Section 6.1 of: + * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt + */ +static int sctp_getsockopt_paddr_thresholds(struct sock *sk, + char __user *optval, + int len, + int __user *optlen) +{ + struct sctp_paddrthlds val; + struct sctp_transport *trans; + struct sctp_association *asoc; + + if (len < sizeof(struct sctp_paddrthlds)) + return -EINVAL; + len = sizeof(struct sctp_paddrthlds); + if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len)) + return -EFAULT; + + if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) { + asoc = sctp_id2assoc(sk, val.spt_assoc_id); + if (!asoc) + return -ENOENT; + + val.spt_pathpfthld = asoc->pf_retrans; + val.spt_pathmaxrxt = asoc->pathmaxrxt; + } else { + trans = sctp_addr_id2transport(sk, &val.spt_address, + val.spt_assoc_id); + if (!trans) + return -ENOENT; + + val.spt_pathmaxrxt = trans->pathmaxrxt; + val.spt_pathpfthld = trans->pf_retrans; + } + + if (put_user(len, optlen) || copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +/* + * SCTP_GET_ASSOC_STATS + * + * This option retrieves local per endpoint statistics. It is modeled + * after OpenSolaris' implementation + */ +static int sctp_getsockopt_assoc_stats(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + struct sctp_assoc_stats sas; + struct sctp_association *asoc = NULL; + + /* User must provide at least the assoc id */ + if (len < sizeof(sctp_assoc_t)) + return -EINVAL; + + /* Allow the struct to grow and fill in as much as possible */ + len = min_t(size_t, len, sizeof(sas)); + + if (copy_from_user(&sas, optval, len)) + return -EFAULT; + + asoc = sctp_id2assoc(sk, sas.sas_assoc_id); + if (!asoc) + return -EINVAL; + + sas.sas_rtxchunks = asoc->stats.rtxchunks; + sas.sas_gapcnt = asoc->stats.gapcnt; + sas.sas_outofseqtsns = asoc->stats.outofseqtsns; + sas.sas_osacks = asoc->stats.osacks; + sas.sas_isacks = asoc->stats.isacks; + sas.sas_octrlchunks = asoc->stats.octrlchunks; + sas.sas_ictrlchunks = asoc->stats.ictrlchunks; + sas.sas_oodchunks = asoc->stats.oodchunks; + sas.sas_iodchunks = asoc->stats.iodchunks; + sas.sas_ouodchunks = asoc->stats.ouodchunks; + sas.sas_iuodchunks = asoc->stats.iuodchunks; + sas.sas_idupchunks = asoc->stats.idupchunks; + sas.sas_opackets = asoc->stats.opackets; + sas.sas_ipackets = asoc->stats.ipackets; + + /* New high max rto observed, will return 0 if not a single + * RTO update took place. obs_rto_ipaddr will be bogus + * in such a case + */ + sas.sas_maxrto = asoc->stats.max_obs_rto; + memcpy(&sas.sas_obs_rto_ipaddr, &asoc->stats.obs_rto_ipaddr, + sizeof(struct sockaddr_storage)); + + /* Mark beginning of a new observation period */ + asoc->stats.max_obs_rto = asoc->rto_min; + + if (put_user(len, optlen)) + return -EFAULT; + + pr_debug("%s: len:%d, assoc_id:%d\n", __func__, len, sas.sas_assoc_id); + + if (copy_to_user(optval, &sas, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_recvrcvinfo(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val = 0; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + if (sctp_sk(sk)->recvrcvinfo) + val = 1; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_recvnxtinfo(struct sock *sk, int len, + char __user *optval, + int __user *optlen) +{ + int val = 0; + + if (len < sizeof(int)) + return -EINVAL; + + len = sizeof(int); + if (sctp_sk(sk)->recvnxtinfo) + val = 1; + if (put_user(len, optlen)) + return -EFAULT; + if (copy_to_user(optval, &val, len)) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt(struct sock *sk, int level, int optname, + char __user *optval, int __user *optlen) +{ + int retval = 0; + int len; + + pr_debug("%s: sk:%p, optname:%d\n", __func__, sk, optname); + + /* I can hardly begin to describe how wrong this is. This is + * so broken as to be worse than useless. The API draft + * REALLY is NOT helpful here... I am not convinced that the + * semantics of getsockopt() with a level OTHER THAN SOL_SCTP + * are at all well-founded. + */ + if (level != SOL_SCTP) { + struct sctp_af *af = sctp_sk(sk)->pf->af; + + retval = af->getsockopt(sk, level, optname, optval, optlen); + return retval; + } + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case SCTP_STATUS: + retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); + break; + case SCTP_DISABLE_FRAGMENTS: + retval = sctp_getsockopt_disable_fragments(sk, len, optval, + optlen); + break; + case SCTP_EVENTS: + retval = sctp_getsockopt_events(sk, len, optval, optlen); + break; + case SCTP_AUTOCLOSE: + retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); + break; + case SCTP_SOCKOPT_PEELOFF: + retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); + break; + case SCTP_PEER_ADDR_PARAMS: + retval = sctp_getsockopt_peer_addr_params(sk, len, optval, + optlen); + break; + case SCTP_DELAYED_SACK: + retval = sctp_getsockopt_delayed_ack(sk, len, optval, + optlen); + break; + case SCTP_INITMSG: + retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); + break; + case SCTP_GET_PEER_ADDRS: + retval = sctp_getsockopt_peer_addrs(sk, len, optval, + optlen); + break; + case SCTP_GET_LOCAL_ADDRS: + retval = sctp_getsockopt_local_addrs(sk, len, optval, + optlen); + break; + case SCTP_SOCKOPT_CONNECTX3: + retval = sctp_getsockopt_connectx3(sk, len, optval, optlen); + break; + case SCTP_DEFAULT_SEND_PARAM: + retval = sctp_getsockopt_default_send_param(sk, len, + optval, optlen); + break; + case SCTP_DEFAULT_SNDINFO: + retval = sctp_getsockopt_default_sndinfo(sk, len, + optval, optlen); + break; + case SCTP_PRIMARY_ADDR: + retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); + break; + case SCTP_NODELAY: + retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); + break; + case SCTP_RTOINFO: + retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); + break; + case SCTP_ASSOCINFO: + retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); + break; + case SCTP_I_WANT_MAPPED_V4_ADDR: + retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); + break; + case SCTP_MAXSEG: + retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); + break; + case SCTP_GET_PEER_ADDR_INFO: + retval = sctp_getsockopt_peer_addr_info(sk, len, optval, + optlen); + break; + case SCTP_ADAPTATION_LAYER: + retval = sctp_getsockopt_adaptation_layer(sk, len, optval, + optlen); + break; + case SCTP_CONTEXT: + retval = sctp_getsockopt_context(sk, len, optval, optlen); + break; + case SCTP_FRAGMENT_INTERLEAVE: + retval = sctp_getsockopt_fragment_interleave(sk, len, optval, + optlen); + break; + case SCTP_PARTIAL_DELIVERY_POINT: + retval = sctp_getsockopt_partial_delivery_point(sk, len, optval, + optlen); + break; + case SCTP_MAX_BURST: + retval = sctp_getsockopt_maxburst(sk, len, optval, optlen); + break; + case SCTP_AUTH_KEY: + case SCTP_AUTH_CHUNK: + case SCTP_AUTH_DELETE_KEY: + retval = -EOPNOTSUPP; + break; + case SCTP_HMAC_IDENT: + retval = sctp_getsockopt_hmac_ident(sk, len, optval, optlen); + break; + case SCTP_AUTH_ACTIVE_KEY: + retval = sctp_getsockopt_active_key(sk, len, optval, optlen); + break; + case SCTP_PEER_AUTH_CHUNKS: + retval = sctp_getsockopt_peer_auth_chunks(sk, len, optval, + optlen); + break; + case SCTP_LOCAL_AUTH_CHUNKS: + retval = sctp_getsockopt_local_auth_chunks(sk, len, optval, + optlen); + break; + case SCTP_GET_ASSOC_NUMBER: + retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); + break; + case SCTP_GET_ASSOC_ID_LIST: + retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); + break; + case SCTP_AUTO_ASCONF: + retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen); + break; + case SCTP_PEER_ADDR_THLDS: + retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen); + break; + case SCTP_GET_ASSOC_STATS: + retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen); + break; + case SCTP_RECVRCVINFO: + retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen); + break; + case SCTP_RECVNXTINFO: + retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen); + break; + default: + retval = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return retval; +} + +static void sctp_hash(struct sock *sk) +{ + /* STUB */ +} + +static void sctp_unhash(struct sock *sk) +{ + /* STUB */ +} + +/* Check if port is acceptable. Possibly find first available port. + * + * The port hash table (contained in the 'global' SCTP protocol storage + * returned by struct sctp_protocol *sctp_get_protocol()). The hash + * table is an array of 4096 lists (sctp_bind_hashbucket). Each + * list (the list number is the port number hashed out, so as you + * would expect from a hash function, all the ports in a given list have + * such a number that hashes out to the same list number; you were + * expecting that, right?); so each list has a set of ports, with a + * link to the socket (struct sock) that uses it, the port number and + * a fastreuse flag (FIXME: NPI ipg). + */ +static struct sctp_bind_bucket *sctp_bucket_create( + struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); + +static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) +{ + struct sctp_bind_hashbucket *head; /* hash list */ + struct sctp_bind_bucket *pp; + unsigned short snum; + int ret; + + snum = ntohs(addr->v4.sin_port); + + pr_debug("%s: begins, snum:%d\n", __func__, snum); + + local_bh_disable(); + + if (snum == 0) { + /* Search for an available port. */ + int low, high, remaining, index; + unsigned int rover; + struct net *net = sock_net(sk); + + inet_get_local_port_range(net, &low, &high); + remaining = (high - low) + 1; + rover = prandom_u32() % remaining + low; + + do { + rover++; + if ((rover < low) || (rover > high)) + rover = low; + if (inet_is_local_reserved_port(net, rover)) + continue; + index = sctp_phashfn(sock_net(sk), rover); + head = &sctp_port_hashtable[index]; + spin_lock(&head->lock); + sctp_for_each_hentry(pp, &head->chain) + if ((pp->port == rover) && + net_eq(sock_net(sk), pp->net)) + goto next; + break; + next: + spin_unlock(&head->lock); + } while (--remaining > 0); + + /* Exhausted local port range during search? */ + ret = 1; + if (remaining <= 0) + goto fail; + + /* OK, here is the one we will use. HEAD (the port + * hash table list entry) is non-NULL and we hold it's + * mutex. + */ + snum = rover; + } else { + /* We are given an specific port number; we verify + * that it is not being used. If it is used, we will + * exahust the search in the hash list corresponding + * to the port number (snum) - we detect that with the + * port iterator, pp being NULL. + */ + head = &sctp_port_hashtable[sctp_phashfn(sock_net(sk), snum)]; + spin_lock(&head->lock); + sctp_for_each_hentry(pp, &head->chain) { + if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) + goto pp_found; + } + } + pp = NULL; + goto pp_not_found; +pp_found: + if (!hlist_empty(&pp->owner)) { + /* We had a port hash table hit - there is an + * available port (pp != NULL) and it is being + * used by other socket (pp->owner not empty); that other + * socket is going to be sk2. + */ + int reuse = sk->sk_reuse; + struct sock *sk2; + + pr_debug("%s: found a possible match\n", __func__); + + if (pp->fastreuse && sk->sk_reuse && + sk->sk_state != SCTP_SS_LISTENING) + goto success; + + /* Run through the list of sockets bound to the port + * (pp->port) [via the pointers bind_next and + * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, + * we get the endpoint they describe and run through + * the endpoint's list of IP (v4 or v6) addresses, + * comparing each of the addresses with the address of + * the socket sk. If we find a match, then that means + * that this port/socket (sk) combination are already + * in an endpoint. + */ + sk_for_each_bound(sk2, &pp->owner) { + struct sctp_endpoint *ep2; + ep2 = sctp_sk(sk2)->ep; + + if (sk == sk2 || + (reuse && sk2->sk_reuse && + sk2->sk_state != SCTP_SS_LISTENING)) + continue; + + if (sctp_bind_addr_conflict(&ep2->base.bind_addr, addr, + sctp_sk(sk2), sctp_sk(sk))) { + ret = (long)sk2; + goto fail_unlock; + } + } + + pr_debug("%s: found a match\n", __func__); + } +pp_not_found: + /* If there was a hash table miss, create a new port. */ + ret = 1; + if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) + goto fail_unlock; + + /* In either case (hit or miss), make sure fastreuse is 1 only + * if sk->sk_reuse is too (that is, if the caller requested + * SO_REUSEADDR on this socket -sk-). + */ + if (hlist_empty(&pp->owner)) { + if (sk->sk_reuse && sk->sk_state != SCTP_SS_LISTENING) + pp->fastreuse = 1; + else + pp->fastreuse = 0; + } else if (pp->fastreuse && + (!sk->sk_reuse || sk->sk_state == SCTP_SS_LISTENING)) + pp->fastreuse = 0; + + /* We are set, so fill up all the data in the hash table + * entry, tie the socket list information with the rest of the + * sockets FIXME: Blurry, NPI (ipg). + */ +success: + if (!sctp_sk(sk)->bind_hash) { + inet_sk(sk)->inet_num = snum; + sk_add_bind_node(sk, &pp->owner); + sctp_sk(sk)->bind_hash = pp; + } + ret = 0; + +fail_unlock: + spin_unlock(&head->lock); + +fail: + local_bh_enable(); + return ret; +} + +/* Assign a 'snum' port to the socket. If snum == 0, an ephemeral + * port is requested. + */ +static int sctp_get_port(struct sock *sk, unsigned short snum) +{ + union sctp_addr addr; + struct sctp_af *af = sctp_sk(sk)->pf->af; + + /* Set up a dummy address struct from the sk. */ + af->from_sk(&addr, sk); + addr.v4.sin_port = htons(snum); + + /* Note: sk->sk_num gets filled in if ephemeral port request. */ + return !!sctp_get_port_local(sk, &addr); +} + +/* + * Move a socket to LISTENING state. + */ +static int sctp_listen_start(struct sock *sk, int backlog) +{ + struct sctp_sock *sp = sctp_sk(sk); + struct sctp_endpoint *ep = sp->ep; + struct crypto_hash *tfm = NULL; + char alg[32]; + + /* Allocate HMAC for generating cookie. */ + if (!sp->hmac && sp->sctp_hmac_alg) { + sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg); + tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + net_info_ratelimited("failed to load transform for %s: %ld\n", + sp->sctp_hmac_alg, PTR_ERR(tfm)); + return -ENOSYS; + } + sctp_sk(sk)->hmac = tfm; + } + + /* + * If a bind() or sctp_bindx() is not called prior to a listen() + * call that allows new associations to be accepted, the system + * picks an ephemeral port and will choose an address set equivalent + * to binding with a wildcard address. + * + * This is not currently spelled out in the SCTP sockets + * extensions draft, but follows the practice as seen in TCP + * sockets. + * + */ + sk->sk_state = SCTP_SS_LISTENING; + if (!ep->base.bind_addr.port) { + if (sctp_autobind(sk)) + return -EAGAIN; + } else { + if (sctp_get_port(sk, inet_sk(sk)->inet_num)) { + sk->sk_state = SCTP_SS_CLOSED; + return -EADDRINUSE; + } + } + + sk->sk_max_ack_backlog = backlog; + sctp_hash_endpoint(ep); + return 0; +} + +/* + * 4.1.3 / 5.1.3 listen() + * + * By default, new associations are not accepted for UDP style sockets. + * An application uses listen() to mark a socket as being able to + * accept new associations. + * + * On TCP style sockets, applications use listen() to ready the SCTP + * endpoint for accepting inbound associations. + * + * On both types of endpoints a backlog of '0' disables listening. + * + * Move a socket to LISTENING state. + */ +int sctp_inet_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + struct sctp_endpoint *ep = sctp_sk(sk)->ep; + int err = -EINVAL; + + if (unlikely(backlog < 0)) + return err; + + lock_sock(sk); + + /* Peeled-off sockets are not allowed to listen(). */ + if (sctp_style(sk, UDP_HIGH_BANDWIDTH)) + goto out; + + if (sock->state != SS_UNCONNECTED) + goto out; + + /* If backlog is zero, disable listening. */ + if (!backlog) { + if (sctp_sstate(sk, CLOSED)) + goto out; + + err = 0; + sctp_unhash_endpoint(ep); + sk->sk_state = SCTP_SS_CLOSED; + if (sk->sk_reuse) + sctp_sk(sk)->bind_hash->fastreuse = 1; + goto out; + } + + /* If we are already listening, just update the backlog */ + if (sctp_sstate(sk, LISTENING)) + sk->sk_max_ack_backlog = backlog; + else { + err = sctp_listen_start(sk, backlog); + if (err) + goto out; + } + + err = 0; +out: + release_sock(sk); + return err; +} + +/* + * This function is done by modeling the current datagram_poll() and the + * tcp_poll(). Note that, based on these implementations, we don't + * lock the socket in this function, even though it seems that, + * ideally, locking or some other mechanisms can be used to ensure + * the integrity of the counters (sndbuf and wmem_alloc) used + * in this place. We assume that we don't need locks either until proven + * otherwise. + * + * Another thing to note is that we include the Async I/O support + * here, again, by modeling the current TCP/UDP code. We don't have + * a good way to test with it yet. + */ +unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) +{ + struct sock *sk = sock->sk; + struct sctp_sock *sp = sctp_sk(sk); + unsigned int mask; + + poll_wait(file, sk_sleep(sk), wait); + + /* A TCP-style listening socket becomes readable when the accept queue + * is not empty. + */ + if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) + return (!list_empty(&sp->ep->asocs)) ? + (POLLIN | POLLRDNORM) : 0; + + mask = 0; + + /* Is there any exceptional events? */ + if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + mask |= POLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0); + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLRDHUP | POLLIN | POLLRDNORM; + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + /* Is it readable? Reconsider this code with TCP-style support. */ + if (!skb_queue_empty(&sk->sk_receive_queue)) + mask |= POLLIN | POLLRDNORM; + + /* The association is either gone or not ready. */ + if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) + return mask; + + /* Is it writable? */ + if (sctp_writeable(sk)) { + mask |= POLLOUT | POLLWRNORM; + } else { + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + /* + * Since the socket is not locked, the buffer + * might be made available after the writeable check and + * before the bit is set. This could cause a lost I/O + * signal. tcp_poll() has a race breaker for this race + * condition. Based on their implementation, we put + * in the following code to cover it as well. + */ + if (sctp_writeable(sk)) + mask |= POLLOUT | POLLWRNORM; + } + return mask; +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +static struct sctp_bind_bucket *sctp_bucket_create( + struct sctp_bind_hashbucket *head, struct net *net, unsigned short snum) +{ + struct sctp_bind_bucket *pp; + + pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); + if (pp) { + SCTP_DBG_OBJCNT_INC(bind_bucket); + pp->port = snum; + pp->fastreuse = 0; + INIT_HLIST_HEAD(&pp->owner); + pp->net = net; + hlist_add_head(&pp->node, &head->chain); + } + return pp; +} + +/* Caller must hold hashbucket lock for this tb with local BH disabled */ +static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) +{ + if (pp && hlist_empty(&pp->owner)) { + __hlist_del(&pp->node); + kmem_cache_free(sctp_bucket_cachep, pp); + SCTP_DBG_OBJCNT_DEC(bind_bucket); + } +} + +/* Release this socket's reference to a local port. */ +static inline void __sctp_put_port(struct sock *sk) +{ + struct sctp_bind_hashbucket *head = + &sctp_port_hashtable[sctp_phashfn(sock_net(sk), + inet_sk(sk)->inet_num)]; + struct sctp_bind_bucket *pp; + + spin_lock(&head->lock); + pp = sctp_sk(sk)->bind_hash; + __sk_del_bind_node(sk); + sctp_sk(sk)->bind_hash = NULL; + inet_sk(sk)->inet_num = 0; + sctp_bucket_destroy(pp); + spin_unlock(&head->lock); +} + +void sctp_put_port(struct sock *sk) +{ + local_bh_disable(); + __sctp_put_port(sk); + local_bh_enable(); +} + +/* + * The system picks an ephemeral port and choose an address set equivalent + * to binding with a wildcard address. + * One of those addresses will be the primary address for the association. + * This automatically enables the multihoming capability of SCTP. + */ +static int sctp_autobind(struct sock *sk) +{ + union sctp_addr autoaddr; + struct sctp_af *af; + __be16 port; + + /* Initialize a local sockaddr structure to INADDR_ANY. */ + af = sctp_sk(sk)->pf->af; + + port = htons(inet_sk(sk)->inet_num); + af->inaddr_any(&autoaddr, port); + + return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); +} + +/* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. + * + * From RFC 2292 + * 4.2 The cmsghdr Structure * + * + * When ancillary data is sent or received, any number of ancillary data + * objects can be specified by the msg_control and msg_controllen members of + * the msghdr structure, because each object is preceded by + * a cmsghdr structure defining the object's length (the cmsg_len member). + * Historically Berkeley-derived implementations have passed only one object + * at a time, but this API allows multiple objects to be + * passed in a single call to sendmsg() or recvmsg(). The following example + * shows two ancillary data objects in a control buffer. + * + * |<--------------------------- msg_controllen -------------------------->| + * | | + * + * |<----- ancillary data object ----->|<----- ancillary data object ----->| + * + * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| + * | | | + * + * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | + * + * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | + * | | | | | + * + * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ + * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| + * + * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| + * + * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ + * ^ + * | + * + * msg_control + * points here + */ +static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) +{ + struct cmsghdr *cmsg; + struct msghdr *my_msg = (struct msghdr *)msg; + + for_each_cmsghdr(cmsg, my_msg) { + if (!CMSG_OK(my_msg, cmsg)) + return -EINVAL; + + /* Should we parse this header or ignore? */ + if (cmsg->cmsg_level != IPPROTO_SCTP) + continue; + + /* Strictly check lengths following example in SCM code. */ + switch (cmsg->cmsg_type) { + case SCTP_INIT: + /* SCTP Socket API Extension + * 5.3.1 SCTP Initiation Structure (SCTP_INIT) + * + * This cmsghdr structure provides information for + * initializing new SCTP associations with sendmsg(). + * The SCTP_INITMSG socket option uses this same data + * structure. This structure is not used for + * recvmsg(). + * + * cmsg_level cmsg_type cmsg_data[] + * ------------ ------------ ---------------------- + * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg + */ + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg))) + return -EINVAL; + + cmsgs->init = CMSG_DATA(cmsg); + break; + + case SCTP_SNDRCV: + /* SCTP Socket API Extension + * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV) + * + * This cmsghdr structure specifies SCTP options for + * sendmsg() and describes SCTP header information + * about a received message through recvmsg(). + * + * cmsg_level cmsg_type cmsg_data[] + * ------------ ------------ ---------------------- + * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo + */ + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) + return -EINVAL; + + cmsgs->srinfo = CMSG_DATA(cmsg); + + if (cmsgs->srinfo->sinfo_flags & + ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_ABORT | SCTP_EOF)) + return -EINVAL; + break; + + case SCTP_SNDINFO: + /* SCTP Socket API Extension + * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO) + * + * This cmsghdr structure specifies SCTP options for + * sendmsg(). This structure and SCTP_RCVINFO replaces + * SCTP_SNDRCV which has been deprecated. + * + * cmsg_level cmsg_type cmsg_data[] + * ------------ ------------ --------------------- + * IPPROTO_SCTP SCTP_SNDINFO struct sctp_sndinfo + */ + if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo))) + return -EINVAL; + + cmsgs->sinfo = CMSG_DATA(cmsg); + + if (cmsgs->sinfo->snd_flags & + ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_ABORT | SCTP_EOF)) + return -EINVAL; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +/* + * Wait for a packet.. + * Note: This function is the same function as in core/datagram.c + * with a few modifications to make lksctp work. + */ +static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p) +{ + int error; + DEFINE_WAIT(wait); + + prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + + /* Socket errors? */ + error = sock_error(sk); + if (error) + goto out; + + if (!skb_queue_empty(&sk->sk_receive_queue)) + goto ready; + + /* Socket shut down? */ + if (sk->sk_shutdown & RCV_SHUTDOWN) + goto out; + + /* Sequenced packets can come disconnected. If so we report the + * problem. + */ + error = -ENOTCONN; + + /* Is there a good reason to think that we may receive some data? */ + if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) + goto out; + + /* Handle signals. */ + if (signal_pending(current)) + goto interrupted; + + /* Let another process have a go. Since we are going to sleep + * anyway. Note: This may cause odd behaviors if the message + * does not fit in the user's buffer, but this seems to be the + * only way to honor MSG_DONTWAIT realistically. + */ + release_sock(sk); + *timeo_p = schedule_timeout(*timeo_p); + lock_sock(sk); + +ready: + finish_wait(sk_sleep(sk), &wait); + return 0; + +interrupted: + error = sock_intr_errno(*timeo_p); + +out: + finish_wait(sk_sleep(sk), &wait); + *err = error; + return error; +} + +/* Receive a datagram. + * Note: This is pretty much the same routine as in core/datagram.c + * with a few changes to make lksctp work. + */ +struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, + int noblock, int *err) +{ + int error; + struct sk_buff *skb; + long timeo; + + timeo = sock_rcvtimeo(sk, noblock); + + pr_debug("%s: timeo:%ld, max:%ld\n", __func__, timeo, + MAX_SCHEDULE_TIMEOUT); + + do { + /* Again only user level code calls this function, + * so nothing interrupt level + * will suddenly eat the receive_queue. + * + * Look at current nfs client by the way... + * However, this function was correct in any case. 8) + */ + if (flags & MSG_PEEK) { + spin_lock_bh(&sk->sk_receive_queue.lock); + skb = skb_peek(&sk->sk_receive_queue); + if (skb) + atomic_inc(&skb->users); + spin_unlock_bh(&sk->sk_receive_queue.lock); + } else { + skb = skb_dequeue(&sk->sk_receive_queue); + } + + if (skb) + return skb; + + /* Caller is allowed not to check sk->sk_err before calling. */ + error = sock_error(sk); + if (error) + goto no_packet; + + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + + if (sk_can_busy_loop(sk) && + sk_busy_loop(sk, noblock)) + continue; + + /* User doesn't want to wait. */ + error = -EAGAIN; + if (!timeo) + goto no_packet; + } while (sctp_wait_for_packet(sk, err, &timeo) == 0); + + return NULL; + +no_packet: + *err = error; + return NULL; +} + +/* If sndbuf has changed, wake up per association sndbuf waiters. */ +static void __sctp_write_space(struct sctp_association *asoc) +{ + struct sock *sk = asoc->base.sk; + struct socket *sock = sk->sk_socket; + + if ((sctp_wspace(asoc) > 0) && sock) { + if (waitqueue_active(&asoc->wait)) + wake_up_interruptible(&asoc->wait); + + if (sctp_writeable(sk)) { + wait_queue_head_t *wq = sk_sleep(sk); + + if (wq && waitqueue_active(wq)) + wake_up_interruptible(wq); + + /* Note that we try to include the Async I/O support + * here by modeling from the current TCP/UDP code. + * We have not tested with it yet. + */ + if (!(sk->sk_shutdown & SEND_SHUTDOWN)) + sock_wake_async(sock, + SOCK_WAKE_SPACE, POLL_OUT); + } + } +} + +static void sctp_wake_up_waiters(struct sock *sk, + struct sctp_association *asoc) +{ + struct sctp_association *tmp = asoc; + + /* We do accounting for the sndbuf space per association, + * so we only need to wake our own association. + */ + if (asoc->ep->sndbuf_policy) + return __sctp_write_space(asoc); + + /* If association goes down and is just flushing its + * outq, then just normally notify others. + */ + if (asoc->base.dead) + return sctp_write_space(sk); + + /* Accounting for the sndbuf space is per socket, so we + * need to wake up others, try to be fair and in case of + * other associations, let them have a go first instead + * of just doing a sctp_write_space() call. + * + * Note that we reach sctp_wake_up_waiters() only when + * associations free up queued chunks, thus we are under + * lock and the list of associations on a socket is + * guaranteed not to change. + */ + for (tmp = list_next_entry(tmp, asocs); 1; + tmp = list_next_entry(tmp, asocs)) { + /* Manually skip the head element. */ + if (&tmp->asocs == &((sctp_sk(sk))->ep->asocs)) + continue; + /* Wake up association. */ + __sctp_write_space(tmp); + /* We've reached the end. */ + if (tmp == asoc) + break; + } +} + +/* Do accounting for the sndbuf space. + * Decrement the used sndbuf space of the corresponding association by the + * data size which was just transmitted(freed). + */ +static void sctp_wfree(struct sk_buff *skb) +{ + struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; + struct sctp_association *asoc = chunk->asoc; + struct sock *sk = asoc->base.sk; + + asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + + sizeof(struct sk_buff) + + sizeof(struct sctp_chunk); + + atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc); + + /* + * This undoes what is done via sctp_set_owner_w and sk_mem_charge + */ + sk->sk_wmem_queued -= skb->truesize; + sk_mem_uncharge(sk, skb->truesize); + + sock_wfree(skb); + sctp_wake_up_waiters(sk, asoc); + + sctp_association_put(asoc); +} + +/* Do accounting for the receive space on the socket. + * Accounting for the association is done in ulpevent.c + * We set this as a destructor for the cloned data skbs so that + * accounting is done at the correct time. + */ +void sctp_sock_rfree(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + struct sctp_ulpevent *event = sctp_skb2event(skb); + + atomic_sub(event->rmem_len, &sk->sk_rmem_alloc); + + /* + * Mimic the behavior of sock_rfree + */ + sk_mem_uncharge(sk, event->rmem_len); +} + + +/* Helper function to wait for space in the sndbuf. */ +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, + size_t msg_len) +{ + struct sock *sk = asoc->base.sk; + int err = 0; + long current_timeo = *timeo_p; + DEFINE_WAIT(wait); + + pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc, + *timeo_p, msg_len); + + /* Increment the association's refcnt. */ + sctp_association_hold(asoc); + + /* Wait on the association specific sndbuf space. */ + for (;;) { + prepare_to_wait_exclusive(&asoc->wait, &wait, + TASK_INTERRUPTIBLE); + if (!*timeo_p) + goto do_nonblock; + if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || + asoc->base.dead) + goto do_error; + if (signal_pending(current)) + goto do_interrupted; + if (msg_len <= sctp_wspace(asoc)) + break; + + /* Let another process have a go. Since we are going + * to sleep anyway. + */ + release_sock(sk); + current_timeo = schedule_timeout(current_timeo); + BUG_ON(sk != asoc->base.sk); + lock_sock(sk); + + *timeo_p = current_timeo; + } + +out: + finish_wait(&asoc->wait, &wait); + + /* Release the association's refcnt. */ + sctp_association_put(asoc); + + return err; + +do_error: + err = -EPIPE; + goto out; + +do_interrupted: + err = sock_intr_errno(*timeo_p); + goto out; + +do_nonblock: + err = -EAGAIN; + goto out; +} + +void sctp_data_ready(struct sock *sk) +{ + struct socket_wq *wq; + + rcu_read_lock(); + wq = rcu_dereference(sk->sk_wq); + if (wq_has_sleeper(wq)) + wake_up_interruptible_sync_poll(&wq->wait, POLLIN | + POLLRDNORM | POLLRDBAND); + sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); + rcu_read_unlock(); +} + +/* If socket sndbuf has changed, wake up all per association waiters. */ +void sctp_write_space(struct sock *sk) +{ + struct sctp_association *asoc; + + /* Wake up the tasks in each wait queue. */ + list_for_each_entry(asoc, &((sctp_sk(sk))->ep->asocs), asocs) { + __sctp_write_space(asoc); + } +} + +/* Is there any sndbuf space available on the socket? + * + * Note that sk_wmem_alloc is the sum of the send buffers on all of the + * associations on the same socket. For a UDP-style socket with + * multiple associations, it is possible for it to be "unwriteable" + * prematurely. I assume that this is acceptable because + * a premature "unwriteable" is better than an accidental "writeable" which + * would cause an unwanted block under certain circumstances. For the 1-1 + * UDP-style sockets or TCP-style sockets, this code should work. + * - Daisy + */ +static int sctp_writeable(struct sock *sk) +{ + int amt = 0; + + amt = sk->sk_sndbuf - sk_wmem_alloc_get(sk); + if (amt < 0) + amt = 0; + return amt; +} + +/* Wait for an association to go into ESTABLISHED state. If timeout is 0, + * returns immediately with EINPROGRESS. + */ +static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) +{ + struct sock *sk = asoc->base.sk; + int err = 0; + long current_timeo = *timeo_p; + DEFINE_WAIT(wait); + + pr_debug("%s: asoc:%p, timeo:%ld\n", __func__, asoc, *timeo_p); + + /* Increment the association's refcnt. */ + sctp_association_hold(asoc); + + for (;;) { + prepare_to_wait_exclusive(&asoc->wait, &wait, + TASK_INTERRUPTIBLE); + if (!*timeo_p) + goto do_nonblock; + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || + asoc->base.dead) + goto do_error; + if (signal_pending(current)) + goto do_interrupted; + + if (sctp_state(asoc, ESTABLISHED)) + break; + + /* Let another process have a go. Since we are going + * to sleep anyway. + */ + release_sock(sk); + current_timeo = schedule_timeout(current_timeo); + lock_sock(sk); + + *timeo_p = current_timeo; + } + +out: + finish_wait(&asoc->wait, &wait); + + /* Release the association's refcnt. */ + sctp_association_put(asoc); + + return err; + +do_error: + if (asoc->init_err_counter + 1 > asoc->max_init_attempts) + err = -ETIMEDOUT; + else + err = -ECONNREFUSED; + goto out; + +do_interrupted: + err = sock_intr_errno(*timeo_p); + goto out; + +do_nonblock: + err = -EINPROGRESS; + goto out; +} + +static int sctp_wait_for_accept(struct sock *sk, long timeo) +{ + struct sctp_endpoint *ep; + int err = 0; + DEFINE_WAIT(wait); + + ep = sctp_sk(sk)->ep; + + + for (;;) { + prepare_to_wait_exclusive(sk_sleep(sk), &wait, + TASK_INTERRUPTIBLE); + + if (list_empty(&ep->asocs)) { + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + } + + err = -EINVAL; + if (!sctp_sstate(sk, LISTENING)) + break; + + err = 0; + if (!list_empty(&ep->asocs)) + break; + + err = sock_intr_errno(timeo); + if (signal_pending(current)) + break; + + err = -EAGAIN; + if (!timeo) + break; + } + + finish_wait(sk_sleep(sk), &wait); + + return err; +} + +static void sctp_wait_for_close(struct sock *sk, long timeout) +{ + DEFINE_WAIT(wait); + + do { + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); + if (list_empty(&sctp_sk(sk)->ep->asocs)) + break; + release_sock(sk); + timeout = schedule_timeout(timeout); + lock_sock(sk); + } while (!signal_pending(current) && timeout); + + finish_wait(sk_sleep(sk), &wait); +} + +static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) +{ + struct sk_buff *frag; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + skb_walk_frags(skb, frag) + sctp_skb_set_owner_r_frag(frag, sk); + +done: + sctp_skb_set_owner_r(skb, sk); +} + +void sctp_copy_sock(struct sock *newsk, struct sock *sk, + struct sctp_association *asoc) +{ + struct inet_sock *inet = inet_sk(sk); + struct inet_sock *newinet; + + newsk->sk_type = sk->sk_type; + newsk->sk_bound_dev_if = sk->sk_bound_dev_if; + newsk->sk_flags = sk->sk_flags; + newsk->sk_no_check_tx = sk->sk_no_check_tx; + newsk->sk_no_check_rx = sk->sk_no_check_rx; + newsk->sk_reuse = sk->sk_reuse; + + newsk->sk_shutdown = sk->sk_shutdown; + newsk->sk_destruct = sctp_destruct_sock; + newsk->sk_family = sk->sk_family; + newsk->sk_protocol = IPPROTO_SCTP; + newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; + newsk->sk_sndbuf = sk->sk_sndbuf; + newsk->sk_rcvbuf = sk->sk_rcvbuf; + newsk->sk_lingertime = sk->sk_lingertime; + newsk->sk_rcvtimeo = sk->sk_rcvtimeo; + newsk->sk_sndtimeo = sk->sk_sndtimeo; + + newinet = inet_sk(newsk); + + /* Initialize sk's sport, dport, rcv_saddr and daddr for + * getsockname() and getpeername() + */ + newinet->inet_sport = inet->inet_sport; + newinet->inet_saddr = inet->inet_saddr; + newinet->inet_rcv_saddr = inet->inet_rcv_saddr; + newinet->inet_dport = htons(asoc->peer.port); + newinet->pmtudisc = inet->pmtudisc; + newinet->inet_id = asoc->next_tsn ^ jiffies; + + newinet->uc_ttl = inet->uc_ttl; + newinet->mc_loop = 1; + newinet->mc_ttl = 1; + newinet->mc_index = 0; + newinet->mc_list = NULL; +} + +static inline void sctp_copy_descendant(struct sock *sk_to, + const struct sock *sk_from) +{ + int ancestor_size = sizeof(struct inet_sock) + + sizeof(struct sctp_sock) - + offsetof(struct sctp_sock, auto_asconf_list); + + if (sk_from->sk_family == PF_INET6) + ancestor_size += sizeof(struct ipv6_pinfo); + + __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); +} + +/* Populate the fields of the newsk from the oldsk and migrate the assoc + * and its messages to the newsk. + */ +static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, + struct sctp_association *assoc, + sctp_socket_type_t type) +{ + struct sctp_sock *oldsp = sctp_sk(oldsk); + struct sctp_sock *newsp = sctp_sk(newsk); + struct sctp_bind_bucket *pp; /* hash list port iterator */ + struct sctp_endpoint *newep = newsp->ep; + struct sk_buff *skb, *tmp; + struct sctp_ulpevent *event; + struct sctp_bind_hashbucket *head; + + /* Migrate socket buffer sizes and all the socket level options to the + * new socket. + */ + newsk->sk_sndbuf = oldsk->sk_sndbuf; + newsk->sk_rcvbuf = oldsk->sk_rcvbuf; + /* Brute force copy old sctp opt. */ + sctp_copy_descendant(newsk, oldsk); + + /* Restore the ep value that was overwritten with the above structure + * copy. + */ + newsp->ep = newep; + newsp->hmac = NULL; + + /* Hook this new socket in to the bind_hash list. */ + head = &sctp_port_hashtable[sctp_phashfn(sock_net(oldsk), + inet_sk(oldsk)->inet_num)]; + local_bh_disable(); + spin_lock(&head->lock); + pp = sctp_sk(oldsk)->bind_hash; + sk_add_bind_node(newsk, &pp->owner); + sctp_sk(newsk)->bind_hash = pp; + inet_sk(newsk)->inet_num = inet_sk(oldsk)->inet_num; + spin_unlock(&head->lock); + local_bh_enable(); + + /* Copy the bind_addr list from the original endpoint to the new + * endpoint so that we can handle restarts properly + */ + sctp_bind_addr_dup(&newsp->ep->base.bind_addr, + &oldsp->ep->base.bind_addr, GFP_KERNEL); + + /* Move any messages in the old socket's receive queue that are for the + * peeled off association to the new socket's receive queue. + */ + sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == assoc) { + __skb_unlink(skb, &oldsk->sk_receive_queue); + __skb_queue_tail(&newsk->sk_receive_queue, skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + } + + /* Clean up any messages pending delivery due to partial + * delivery. Three cases: + * 1) No partial deliver; no work. + * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. + * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. + */ + skb_queue_head_init(&newsp->pd_lobby); + atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode); + + if (atomic_read(&sctp_sk(oldsk)->pd_mode)) { + struct sk_buff_head *queue; + + /* Decide which queue to move pd_lobby skbs to. */ + if (assoc->ulpq.pd_mode) { + queue = &newsp->pd_lobby; + } else + queue = &newsk->sk_receive_queue; + + /* Walk through the pd_lobby, looking for skbs that + * need moved to the new socket. + */ + sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == assoc) { + __skb_unlink(skb, &oldsp->pd_lobby); + __skb_queue_tail(queue, skb); + sctp_skb_set_owner_r_frag(skb, newsk); + } + } + + /* Clear up any skbs waiting for the partial + * delivery to finish. + */ + if (assoc->ulpq.pd_mode) + sctp_clear_pd(oldsk, NULL); + + } + + sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) + sctp_skb_set_owner_r_frag(skb, newsk); + + sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) + sctp_skb_set_owner_r_frag(skb, newsk); + + /* Set the type of socket to indicate that it is peeled off from the + * original UDP-style socket or created with the accept() call on a + * TCP-style socket.. + */ + newsp->type = type; + + /* Mark the new socket "in-use" by the user so that any packets + * that may arrive on the association after we've moved it are + * queued to the backlog. This prevents a potential race between + * backlog processing on the old socket and new-packet processing + * on the new socket. + * + * The caller has just allocated newsk so we can guarantee that other + * paths won't try to lock it and then oldsk. + */ + lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); + sctp_assoc_migrate(assoc, newsk); + + /* If the association on the newsk is already closed before accept() + * is called, set RCV_SHUTDOWN flag. + */ + if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) + newsk->sk_shutdown |= RCV_SHUTDOWN; + + newsk->sk_state = SCTP_SS_ESTABLISHED; + release_sock(newsk); +} + + +/* This proto struct describes the ULP interface for SCTP. */ +struct proto sctp_prot = { + .name = "SCTP", + .owner = THIS_MODULE, + .close = sctp_close, + .connect = sctp_connect, + .disconnect = sctp_disconnect, + .accept = sctp_accept, + .ioctl = sctp_ioctl, + .init = sctp_init_sock, + .destroy = sctp_destroy_sock, + .shutdown = sctp_shutdown, + .setsockopt = sctp_setsockopt, + .getsockopt = sctp_getsockopt, + .sendmsg = sctp_sendmsg, + .recvmsg = sctp_recvmsg, + .bind = sctp_bind, + .backlog_rcv = sctp_backlog_rcv, + .hash = sctp_hash, + .unhash = sctp_unhash, + .get_port = sctp_get_port, + .obj_size = sizeof(struct sctp_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, + .sockets_allocated = &sctp_sockets_allocated, +}; + +#if IS_ENABLED(CONFIG_IPV6) + +struct proto sctpv6_prot = { + .name = "SCTPv6", + .owner = THIS_MODULE, + .close = sctp_close, + .connect = sctp_connect, + .disconnect = sctp_disconnect, + .accept = sctp_accept, + .ioctl = sctp_ioctl, + .init = sctp_init_sock, + .destroy = sctp_destroy_sock, + .shutdown = sctp_shutdown, + .setsockopt = sctp_setsockopt, + .getsockopt = sctp_getsockopt, + .sendmsg = sctp_sendmsg, + .recvmsg = sctp_recvmsg, + .bind = sctp_bind, + .backlog_rcv = sctp_backlog_rcv, + .hash = sctp_hash, + .unhash = sctp_unhash, + .get_port = sctp_get_port, + .obj_size = sizeof(struct sctp6_sock), + .sysctl_mem = sysctl_sctp_mem, + .sysctl_rmem = sysctl_sctp_rmem, + .sysctl_wmem = sysctl_sctp_wmem, + .memory_pressure = &sctp_memory_pressure, + .enter_memory_pressure = sctp_enter_memory_pressure, + .memory_allocated = &sctp_memory_allocated, + .sockets_allocated = &sctp_sockets_allocated, +}; +#endif /* IS_ENABLED(CONFIG_IPV6) */ diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c new file mode 100644 index 000000000..b9c8521c1 --- /dev/null +++ b/net/sctp/ssnmap.c @@ -0,0 +1,125 @@ +/* SCTP kernel implementation + * Copyright (c) 2003 International Business Machines, Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions manipulate sctp SSN tracker. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Jon Grimm <jgrimm@us.ibm.com> + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, + __u16 out); + +/* Storage size needed for map includes 2 headers and then the + * specific needs of in or out streams. + */ +static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) +{ + return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16); +} + + +/* Create a new sctp_ssnmap. + * Allocate room to store at least 'len' contiguous TSNs. + */ +struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, + gfp_t gfp) +{ + struct sctp_ssnmap *retval; + int size; + + size = sctp_ssnmap_size(in, out); + if (size <= KMALLOC_MAX_SIZE) + retval = kmalloc(size, gfp); + else + retval = (struct sctp_ssnmap *) + __get_free_pages(gfp, get_order(size)); + if (!retval) + goto fail; + + if (!sctp_ssnmap_init(retval, in, out)) + goto fail_map; + + SCTP_DBG_OBJCNT_INC(ssnmap); + + return retval; + +fail_map: + if (size <= KMALLOC_MAX_SIZE) + kfree(retval); + else + free_pages((unsigned long)retval, get_order(size)); +fail: + return NULL; +} + + +/* Initialize a block of memory as a ssnmap. */ +static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, + __u16 out) +{ + memset(map, 0x00, sctp_ssnmap_size(in, out)); + + /* Start 'in' stream just after the map header. */ + map->in.ssn = (__u16 *)&map[1]; + map->in.len = in; + + /* Start 'out' stream just after 'in'. */ + map->out.ssn = &map->in.ssn[in]; + map->out.len = out; + + return map; +} + +/* Clear out the ssnmap streams. */ +void sctp_ssnmap_clear(struct sctp_ssnmap *map) +{ + size_t size; + + size = (map->in.len + map->out.len) * sizeof(__u16); + memset(map->in.ssn, 0x00, size); +} + +/* Dispose of a ssnmap. */ +void sctp_ssnmap_free(struct sctp_ssnmap *map) +{ + int size; + + if (unlikely(!map)) + return; + + size = sctp_ssnmap_size(map->in.len, map->out.len); + if (size <= KMALLOC_MAX_SIZE) + kfree(map); + else + free_pages((unsigned long)map, get_order(size)); + + SCTP_DBG_OBJCNT_DEC(ssnmap); +} diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c new file mode 100644 index 000000000..26d50c565 --- /dev/null +++ b/net/sctp/sysctl.c @@ -0,0 +1,501 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2002, 2004 + * Copyright (c) 2002 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * Sysctl related interfaces for SCTP. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Mingqin Liu <liuming@us.ibm.com> + * Jon Grimm <jgrimm@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + * Ryan Layer <rmlayer@us.ibm.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <net/sctp/structs.h> +#include <net/sctp/sctp.h> +#include <linux/sysctl.h> + +static int zero = 0; +static int one = 1; +static int timer_max = 86400000; /* ms in one day */ +static int int_max = INT_MAX; +static int sack_timer_min = 1; +static int sack_timer_max = 500; +static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ +static int rwnd_scale_max = 16; +static int rto_alpha_min = 0; +static int rto_beta_min = 0; +static int rto_alpha_max = 1000; +static int rto_beta_max = 1000; + +static unsigned long max_autoclose_min = 0; +static unsigned long max_autoclose_max = + (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX) + ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ; + +static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); +static int proc_sctp_do_auth(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos); + +static struct ctl_table sctp_table[] = { + { + .procname = "sctp_mem", + .data = &sysctl_sctp_mem, + .maxlen = sizeof(sysctl_sctp_mem), + .mode = 0644, + .proc_handler = proc_doulongvec_minmax + }, + { + .procname = "sctp_rmem", + .data = &sysctl_sctp_rmem, + .maxlen = sizeof(sysctl_sctp_rmem), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "sctp_wmem", + .data = &sysctl_sctp_wmem, + .maxlen = sizeof(sysctl_sctp_wmem), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + + { /* sentinel */ } +}; + +static struct ctl_table sctp_net_table[] = { + { + .procname = "rto_initial", + .data = &init_net.sctp.rto_initial, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &timer_max + }, + { + .procname = "rto_min", + .data = &init_net.sctp.rto_min, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_sctp_do_rto_min, + .extra1 = &one, + .extra2 = &init_net.sctp.rto_max + }, + { + .procname = "rto_max", + .data = &init_net.sctp.rto_max, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_sctp_do_rto_max, + .extra1 = &init_net.sctp.rto_min, + .extra2 = &timer_max + }, + { + .procname = "rto_alpha_exp_divisor", + .data = &init_net.sctp.rto_alpha, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_sctp_do_alpha_beta, + .extra1 = &rto_alpha_min, + .extra2 = &rto_alpha_max, + }, + { + .procname = "rto_beta_exp_divisor", + .data = &init_net.sctp.rto_beta, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_sctp_do_alpha_beta, + .extra1 = &rto_beta_min, + .extra2 = &rto_beta_max, + }, + { + .procname = "max_burst", + .data = &init_net.sctp.max_burst, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max + }, + { + .procname = "cookie_preserve_enable", + .data = &init_net.sctp.cookie_preserve_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "cookie_hmac_alg", + .data = &init_net.sctp.sctp_hmac_alg, + .maxlen = 8, + .mode = 0644, + .proc_handler = proc_sctp_do_hmac_alg, + }, + { + .procname = "valid_cookie_life", + .data = &init_net.sctp.valid_cookie_life, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &timer_max + }, + { + .procname = "sack_timeout", + .data = &init_net.sctp.sack_timeout, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &sack_timer_min, + .extra2 = &sack_timer_max, + }, + { + .procname = "hb_interval", + .data = &init_net.sctp.hb_interval, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &timer_max + }, + { + .procname = "association_max_retrans", + .data = &init_net.sctp.max_retrans_association, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &int_max + }, + { + .procname = "path_max_retrans", + .data = &init_net.sctp.max_retrans_path, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &int_max + }, + { + .procname = "max_init_retransmits", + .data = &init_net.sctp.max_retrans_init, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &int_max + }, + { + .procname = "pf_retrans", + .data = &init_net.sctp.pf_retrans, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max + }, + { + .procname = "sndbuf_policy", + .data = &init_net.sctp.sndbuf_policy, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "rcvbuf_policy", + .data = &init_net.sctp.rcvbuf_policy, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "default_auto_asconf", + .data = &init_net.sctp.default_auto_asconf, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "addip_enable", + .data = &init_net.sctp.addip_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "addip_noauth_enable", + .data = &init_net.sctp.addip_noauth, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "prsctp_enable", + .data = &init_net.sctp.prsctp_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "auth_enable", + .data = &init_net.sctp.auth_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_sctp_do_auth, + }, + { + .procname = "addr_scope_policy", + .data = &init_net.sctp.scope_policy, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &addr_scope_max, + }, + { + .procname = "rwnd_update_shift", + .data = &init_net.sctp.rwnd_upd_shift, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &rwnd_scale_max, + }, + { + .procname = "max_autoclose", + .data = &init_net.sctp.max_autoclose, + .maxlen = sizeof(unsigned long), + .mode = 0644, + .proc_handler = &proc_doulongvec_minmax, + .extra1 = &max_autoclose_min, + .extra2 = &max_autoclose_max, + }, + + { /* sentinel */ } +}; + +static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + struct ctl_table tbl; + bool changed = false; + char *none = "none"; + char tmp[8]; + int ret; + + memset(&tbl, 0, sizeof(struct ctl_table)); + + if (write) { + tbl.data = tmp; + tbl.maxlen = sizeof(tmp); + } else { + tbl.data = net->sctp.sctp_hmac_alg ? : none; + tbl.maxlen = strlen(tbl.data); + } + + ret = proc_dostring(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { +#ifdef CONFIG_CRYPTO_MD5 + if (!strncmp(tmp, "md5", 3)) { + net->sctp.sctp_hmac_alg = "md5"; + changed = true; + } +#endif +#ifdef CONFIG_CRYPTO_SHA1 + if (!strncmp(tmp, "sha1", 4)) { + net->sctp.sctp_hmac_alg = "sha1"; + changed = true; + } +#endif + if (!strncmp(tmp, "none", 4)) { + net->sctp.sctp_hmac_alg = NULL; + changed = true; + } + if (!changed) + ret = -EINVAL; + } + + return ret; +} + +static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; + struct ctl_table tbl; + int ret, new_value; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.rto_min; + + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { + if (new_value > max || new_value < min) + return -EINVAL; + + net->sctp.rto_min = new_value; + } + + return ret; +} + +static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + unsigned int min = *(unsigned int *) ctl->extra1; + unsigned int max = *(unsigned int *) ctl->extra2; + struct ctl_table tbl; + int ret, new_value; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.rto_max; + + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { + if (new_value > max || new_value < min) + return -EINVAL; + + net->sctp.rto_max = new_value; + } + + return ret; +} + +static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + if (write) + pr_warn_once("Changing rto_alpha or rto_beta may lead to " + "suboptimal rtt/srtt estimations!\n"); + + return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos); +} + +static int proc_sctp_do_auth(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct net *net = current->nsproxy->net_ns; + struct ctl_table tbl; + int new_value, ret; + + memset(&tbl, 0, sizeof(struct ctl_table)); + tbl.maxlen = sizeof(unsigned int); + + if (write) + tbl.data = &new_value; + else + tbl.data = &net->sctp.auth_enable; + + ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); + if (write && ret == 0) { + struct sock *sk = net->sctp.ctl_sock; + + net->sctp.auth_enable = new_value; + /* Update the value in the control socket */ + lock_sock(sk); + sctp_sk(sk)->ep->auth_enable = new_value; + release_sock(sk); + } + + return ret; +} + +int sctp_sysctl_net_register(struct net *net) +{ + struct ctl_table *table; + int i; + + table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + for (i = 0; table[i].data; i++) + table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp; + + net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table); + if (net->sctp.sysctl_header == NULL) { + kfree(table); + return -ENOMEM; + } + return 0; +} + +void sctp_sysctl_net_unregister(struct net *net) +{ + struct ctl_table *table; + + table = net->sctp.sysctl_header->ctl_table_arg; + unregister_net_sysctl_table(net->sctp.sysctl_header); + kfree(table); +} + +static struct ctl_table_header *sctp_sysctl_header; + +/* Sysctl registration. */ +void sctp_sysctl_register(void) +{ + sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table); +} + +/* Sysctl deregistration. */ +void sctp_sysctl_unregister(void) +{ + unregister_net_sysctl_table(sctp_sysctl_header); +} diff --git a/net/sctp/transport.c b/net/sctp/transport.c new file mode 100644 index 000000000..a0a431824 --- /dev/null +++ b/net/sctp/transport.c @@ -0,0 +1,656 @@ +/* SCTP kernel implementation + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001-2003 International Business Machines Corp. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This file is part of the SCTP kernel implementation + * + * This module provides the abstraction for an SCTP tranport representing + * a remote transport address. For local transport addresses, we just use + * union sctp_addr. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Karl Knutson <karl@athena.chicago.il.us> + * Jon Grimm <jgrimm@us.ibm.com> + * Xingang Guo <xingang.guo@intel.com> + * Hui Huang <hui.huang@nokia.com> + * Sridhar Samudrala <sri@us.ibm.com> + * Ardelle Fan <ardelle.fan@intel.com> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/random.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* 1st Level Abstractions. */ + +/* Initialize a new transport from provided memory. */ +static struct sctp_transport *sctp_transport_init(struct net *net, + struct sctp_transport *peer, + const union sctp_addr *addr, + gfp_t gfp) +{ + /* Copy in the address. */ + peer->ipaddr = *addr; + peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); + memset(&peer->saddr, 0, sizeof(union sctp_addr)); + + peer->sack_generation = 0; + + /* From 6.3.1 RTO Calculation: + * + * C1) Until an RTT measurement has been made for a packet sent to the + * given destination transport address, set RTO to the protocol + * parameter 'RTO.Initial'. + */ + peer->rto = msecs_to_jiffies(net->sctp.rto_initial); + + peer->last_time_heard = ktime_get(); + peer->last_time_ecne_reduced = jiffies; + + peer->param_flags = SPP_HB_DISABLE | + SPP_PMTUD_ENABLE | + SPP_SACKDELAY_ENABLE; + + /* Initialize the default path max_retrans. */ + peer->pathmaxrxt = net->sctp.max_retrans_path; + peer->pf_retrans = net->sctp.pf_retrans; + + INIT_LIST_HEAD(&peer->transmitted); + INIT_LIST_HEAD(&peer->send_ready); + INIT_LIST_HEAD(&peer->transports); + + setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, + (unsigned long)peer); + setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event, + (unsigned long)peer); + setup_timer(&peer->proto_unreach_timer, + sctp_generate_proto_unreach_event, (unsigned long)peer); + + /* Initialize the 64-bit random nonce sent with heartbeat. */ + get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce)); + + atomic_set(&peer->refcnt, 1); + + return peer; +} + +/* Allocate and initialize a new transport. */ +struct sctp_transport *sctp_transport_new(struct net *net, + const union sctp_addr *addr, + gfp_t gfp) +{ + struct sctp_transport *transport; + + transport = kzalloc(sizeof(*transport), gfp); + if (!transport) + goto fail; + + if (!sctp_transport_init(net, transport, addr, gfp)) + goto fail_init; + + SCTP_DBG_OBJCNT_INC(transport); + + return transport; + +fail_init: + kfree(transport); + +fail: + return NULL; +} + +/* This transport is no longer needed. Free up if possible, or + * delay until it last reference count. + */ +void sctp_transport_free(struct sctp_transport *transport) +{ + transport->dead = 1; + + /* Try to delete the heartbeat timer. */ + if (del_timer(&transport->hb_timer)) + sctp_transport_put(transport); + + /* Delete the T3_rtx timer if it's active. + * There is no point in not doing this now and letting + * structure hang around in memory since we know + * the tranport is going away. + */ + if (del_timer(&transport->T3_rtx_timer)) + sctp_transport_put(transport); + + /* Delete the ICMP proto unreachable timer if it's active. */ + if (del_timer(&transport->proto_unreach_timer)) + sctp_association_put(transport->asoc); + + sctp_transport_put(transport); +} + +static void sctp_transport_destroy_rcu(struct rcu_head *head) +{ + struct sctp_transport *transport; + + transport = container_of(head, struct sctp_transport, rcu); + + dst_release(transport->dst); + kfree(transport); + SCTP_DBG_OBJCNT_DEC(transport); +} + +/* Destroy the transport data structure. + * Assumes there are no more users of this structure. + */ +static void sctp_transport_destroy(struct sctp_transport *transport) +{ + if (unlikely(!transport->dead)) { + WARN(1, "Attempt to destroy undead transport %p!\n", transport); + return; + } + + sctp_packet_free(&transport->packet); + + if (transport->asoc) + sctp_association_put(transport->asoc); + + call_rcu(&transport->rcu, sctp_transport_destroy_rcu); +} + +/* Start T3_rtx timer if it is not already running and update the heartbeat + * timer. This routine is called every time a DATA chunk is sent. + */ +void sctp_transport_reset_timers(struct sctp_transport *transport) +{ + /* RFC 2960 6.3.2 Retransmission Timer Rules + * + * R1) Every time a DATA chunk is sent to any address(including a + * retransmission), if the T3-rtx timer of that address is not running + * start it running so that it will expire after the RTO of that + * address. + */ + + if (!timer_pending(&transport->T3_rtx_timer)) + if (!mod_timer(&transport->T3_rtx_timer, + jiffies + transport->rto)) + sctp_transport_hold(transport); + + /* When a data chunk is sent, reset the heartbeat interval. */ + if (!mod_timer(&transport->hb_timer, + sctp_transport_timeout(transport))) + sctp_transport_hold(transport); +} + +/* This transport has been assigned to an association. + * Initialize fields from the association or from the sock itself. + * Register the reference count in the association. + */ +void sctp_transport_set_owner(struct sctp_transport *transport, + struct sctp_association *asoc) +{ + transport->asoc = asoc; + sctp_association_hold(asoc); +} + +/* Initialize the pmtu of a transport. */ +void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) +{ + /* If we don't have a fresh route, look one up */ + if (!transport->dst || transport->dst->obsolete) { + dst_release(transport->dst); + transport->af_specific->get_dst(transport, &transport->saddr, + &transport->fl, sk); + } + + if (transport->dst) { + transport->pathmtu = dst_mtu(transport->dst); + } else + transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; +} + +void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu) +{ + struct dst_entry *dst; + + if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { + pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n", + __func__, pmtu, + SCTP_DEFAULT_MINSEGMENT); + /* Use default minimum segment size and disable + * pmtu discovery on this transport. + */ + t->pathmtu = SCTP_DEFAULT_MINSEGMENT; + } else { + t->pathmtu = pmtu; + } + + dst = sctp_transport_dst_check(t); + if (!dst) + t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); + + if (dst) { + dst->ops->update_pmtu(dst, sk, NULL, pmtu); + + dst = sctp_transport_dst_check(t); + if (!dst) + t->af_specific->get_dst(t, &t->saddr, &t->fl, sk); + } +} + +/* Caches the dst entry and source address for a transport's destination + * address. + */ +void sctp_transport_route(struct sctp_transport *transport, + union sctp_addr *saddr, struct sctp_sock *opt) +{ + struct sctp_association *asoc = transport->asoc; + struct sctp_af *af = transport->af_specific; + + af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt)); + + if (saddr) + memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); + else + af->get_saddr(opt, transport, &transport->fl); + + if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { + return; + } + if (transport->dst) { + transport->pathmtu = dst_mtu(transport->dst); + + /* Initialize sk->sk_rcv_saddr, if the transport is the + * association's active path for getsockname(). + */ + if (asoc && (!asoc->peer.primary_path || + (transport == asoc->peer.active_path))) + opt->pf->to_sk_saddr(&transport->saddr, + asoc->base.sk); + } else + transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; +} + +/* Hold a reference to a transport. */ +void sctp_transport_hold(struct sctp_transport *transport) +{ + atomic_inc(&transport->refcnt); +} + +/* Release a reference to a transport and clean up + * if there are no more references. + */ +void sctp_transport_put(struct sctp_transport *transport) +{ + if (atomic_dec_and_test(&transport->refcnt)) + sctp_transport_destroy(transport); +} + +/* Update transport's RTO based on the newly calculated RTT. */ +void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) +{ + if (unlikely(!tp->rto_pending)) + /* We should not be doing any RTO updates unless rto_pending is set. */ + pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); + + if (tp->rttvar || tp->srtt) { + struct net *net = sock_net(tp->asoc->base.sk); + /* 6.3.1 C3) When a new RTT measurement R' is made, set + * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| + * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' + */ + + /* Note: The above algorithm has been rewritten to + * express rto_beta and rto_alpha as inverse powers + * of two. + * For example, assuming the default value of RTO.Alpha of + * 1/8, rto_alpha would be expressed as 3. + */ + tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta) + + (((__u32)abs64((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta); + tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha) + + (rtt >> net->sctp.rto_alpha); + } else { + /* 6.3.1 C2) When the first RTT measurement R is made, set + * SRTT <- R, RTTVAR <- R/2. + */ + tp->srtt = rtt; + tp->rttvar = rtt >> 1; + } + + /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then + * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. + */ + if (tp->rttvar == 0) + tp->rttvar = SCTP_CLOCK_GRANULARITY; + + /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ + tp->rto = tp->srtt + (tp->rttvar << 2); + + /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min + * seconds then it is rounded up to RTO.Min seconds. + */ + if (tp->rto < tp->asoc->rto_min) + tp->rto = tp->asoc->rto_min; + + /* 6.3.1 C7) A maximum value may be placed on RTO provided it is + * at least RTO.max seconds. + */ + if (tp->rto > tp->asoc->rto_max) + tp->rto = tp->asoc->rto_max; + + sctp_max_rto(tp->asoc, tp); + tp->rtt = rtt; + + /* Reset rto_pending so that a new RTT measurement is started when a + * new data chunk is sent. + */ + tp->rto_pending = 0; + + pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n", + __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto); +} + +/* This routine updates the transport's cwnd and partial_bytes_acked + * parameters based on the bytes acked in the received SACK. + */ +void sctp_transport_raise_cwnd(struct sctp_transport *transport, + __u32 sack_ctsn, __u32 bytes_acked) +{ + struct sctp_association *asoc = transport->asoc; + __u32 cwnd, ssthresh, flight_size, pba, pmtu; + + cwnd = transport->cwnd; + flight_size = transport->flight_size; + + /* See if we need to exit Fast Recovery first */ + if (asoc->fast_recovery && + TSN_lte(asoc->fast_recovery_exit, sack_ctsn)) + asoc->fast_recovery = 0; + + /* The appropriate cwnd increase algorithm is performed if, and only + * if the cumulative TSN whould advanced and the congestion window is + * being fully utilized. + */ + if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || + (flight_size < cwnd)) + return; + + ssthresh = transport->ssthresh; + pba = transport->partial_bytes_acked; + pmtu = transport->asoc->pathmtu; + + if (cwnd <= ssthresh) { + /* RFC 4960 7.2.1 + * o When cwnd is less than or equal to ssthresh, an SCTP + * endpoint MUST use the slow-start algorithm to increase + * cwnd only if the current congestion window is being fully + * utilized, an incoming SACK advances the Cumulative TSN + * Ack Point, and the data sender is not in Fast Recovery. + * Only when these three conditions are met can the cwnd be + * increased; otherwise, the cwnd MUST not be increased. + * If these conditions are met, then cwnd MUST be increased + * by, at most, the lesser of 1) the total size of the + * previously outstanding DATA chunk(s) acknowledged, and + * 2) the destination's path MTU. This upper bound protects + * against the ACK-Splitting attack outlined in [SAVAGE99]. + */ + if (asoc->fast_recovery) + return; + + if (bytes_acked > pmtu) + cwnd += pmtu; + else + cwnd += bytes_acked; + + pr_debug("%s: slow start: transport:%p, bytes_acked:%d, " + "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n", + __func__, transport, bytes_acked, cwnd, ssthresh, + flight_size, pba); + } else { + /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, + * upon each SACK arrival that advances the Cumulative TSN Ack + * Point, increase partial_bytes_acked by the total number of + * bytes of all new chunks acknowledged in that SACK including + * chunks acknowledged by the new Cumulative TSN Ack and by + * Gap Ack Blocks. + * + * When partial_bytes_acked is equal to or greater than cwnd + * and before the arrival of the SACK the sender had cwnd or + * more bytes of data outstanding (i.e., before arrival of the + * SACK, flightsize was greater than or equal to cwnd), + * increase cwnd by MTU, and reset partial_bytes_acked to + * (partial_bytes_acked - cwnd). + */ + pba += bytes_acked; + if (pba >= cwnd) { + cwnd += pmtu; + pba = ((cwnd < pba) ? (pba - cwnd) : 0); + } + + pr_debug("%s: congestion avoidance: transport:%p, " + "bytes_acked:%d, cwnd:%d, ssthresh:%d, " + "flight_size:%d, pba:%d\n", __func__, + transport, bytes_acked, cwnd, ssthresh, + flight_size, pba); + } + + transport->cwnd = cwnd; + transport->partial_bytes_acked = pba; +} + +/* This routine is used to lower the transport's cwnd when congestion is + * detected. + */ +void sctp_transport_lower_cwnd(struct sctp_transport *transport, + sctp_lower_cwnd_t reason) +{ + struct sctp_association *asoc = transport->asoc; + + switch (reason) { + case SCTP_LOWER_CWND_T3_RTX: + /* RFC 2960 Section 7.2.3, sctpimpguide + * When the T3-rtx timer expires on an address, SCTP should + * perform slow start by: + * ssthresh = max(cwnd/2, 4*MTU) + * cwnd = 1*MTU + * partial_bytes_acked = 0 + */ + transport->ssthresh = max(transport->cwnd/2, + 4*asoc->pathmtu); + transport->cwnd = asoc->pathmtu; + + /* T3-rtx also clears fast recovery */ + asoc->fast_recovery = 0; + break; + + case SCTP_LOWER_CWND_FAST_RTX: + /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the + * destination address(es) to which the missing DATA chunks + * were last sent, according to the formula described in + * Section 7.2.3. + * + * RFC 2960 7.2.3, sctpimpguide Upon detection of packet + * losses from SACK (see Section 7.2.4), An endpoint + * should do the following: + * ssthresh = max(cwnd/2, 4*MTU) + * cwnd = ssthresh + * partial_bytes_acked = 0 + */ + if (asoc->fast_recovery) + return; + + /* Mark Fast recovery */ + asoc->fast_recovery = 1; + asoc->fast_recovery_exit = asoc->next_tsn - 1; + + transport->ssthresh = max(transport->cwnd/2, + 4*asoc->pathmtu); + transport->cwnd = transport->ssthresh; + break; + + case SCTP_LOWER_CWND_ECNE: + /* RFC 2481 Section 6.1.2. + * If the sender receives an ECN-Echo ACK packet + * then the sender knows that congestion was encountered in the + * network on the path from the sender to the receiver. The + * indication of congestion should be treated just as a + * congestion loss in non-ECN Capable TCP. That is, the TCP + * source halves the congestion window "cwnd" and reduces the + * slow start threshold "ssthresh". + * A critical condition is that TCP does not react to + * congestion indications more than once every window of + * data (or more loosely more than once every round-trip time). + */ + if (time_after(jiffies, transport->last_time_ecne_reduced + + transport->rtt)) { + transport->ssthresh = max(transport->cwnd/2, + 4*asoc->pathmtu); + transport->cwnd = transport->ssthresh; + transport->last_time_ecne_reduced = jiffies; + } + break; + + case SCTP_LOWER_CWND_INACTIVE: + /* RFC 2960 Section 7.2.1, sctpimpguide + * When the endpoint does not transmit data on a given + * transport address, the cwnd of the transport address + * should be adjusted to max(cwnd/2, 4*MTU) per RTO. + * NOTE: Although the draft recommends that this check needs + * to be done every RTO interval, we do it every hearbeat + * interval. + */ + transport->cwnd = max(transport->cwnd/2, + 4*asoc->pathmtu); + break; + } + + transport->partial_bytes_acked = 0; + + pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n", + __func__, transport, reason, transport->cwnd, + transport->ssthresh); +} + +/* Apply Max.Burst limit to the congestion window: + * sctpimpguide-05 2.14.2 + * D) When the time comes for the sender to + * transmit new DATA chunks, the protocol parameter Max.Burst MUST + * first be applied to limit how many new DATA chunks may be sent. + * The limit is applied by adjusting cwnd as follows: + * if ((flightsize+ Max.Burst * MTU) < cwnd) + * cwnd = flightsize + Max.Burst * MTU + */ + +void sctp_transport_burst_limited(struct sctp_transport *t) +{ + struct sctp_association *asoc = t->asoc; + u32 old_cwnd = t->cwnd; + u32 max_burst_bytes; + + if (t->burst_limited || asoc->max_burst == 0) + return; + + max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu); + if (max_burst_bytes < old_cwnd) { + t->cwnd = max_burst_bytes; + t->burst_limited = old_cwnd; + } +} + +/* Restore the old cwnd congestion window, after the burst had it's + * desired effect. + */ +void sctp_transport_burst_reset(struct sctp_transport *t) +{ + if (t->burst_limited) { + t->cwnd = t->burst_limited; + t->burst_limited = 0; + } +} + +/* What is the next timeout value for this transport? */ +unsigned long sctp_transport_timeout(struct sctp_transport *trans) +{ + /* RTO + timer slack +/- 50% of RTO */ + unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto); + + if (trans->state != SCTP_UNCONFIRMED && + trans->state != SCTP_PF) + timeout += trans->hbinterval; + + return timeout + jiffies; +} + +/* Reset transport variables to their initial values */ +void sctp_transport_reset(struct sctp_transport *t) +{ + struct sctp_association *asoc = t->asoc; + + /* RFC 2960 (bis), Section 5.2.4 + * All the congestion control parameters (e.g., cwnd, ssthresh) + * related to this peer MUST be reset to their initial values + * (see Section 6.2.1) + */ + t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380)); + t->burst_limited = 0; + t->ssthresh = asoc->peer.i.a_rwnd; + t->rto = asoc->rto_initial; + sctp_max_rto(asoc, t); + t->rtt = 0; + t->srtt = 0; + t->rttvar = 0; + + /* Reset these additional varibles so that we have a clean + * slate. + */ + t->partial_bytes_acked = 0; + t->flight_size = 0; + t->error_count = 0; + t->rto_pending = 0; + t->hb_sent = 0; + + /* Initialize the state information for SFR-CACC */ + t->cacc.changeover_active = 0; + t->cacc.cycling_changeover = 0; + t->cacc.next_tsn_at_change = 0; + t->cacc.cacc_saw_newack = 0; +} + +/* Schedule retransmission on the given transport */ +void sctp_transport_immediate_rtx(struct sctp_transport *t) +{ + /* Stop pending T3_rtx_timer */ + if (del_timer(&t->T3_rtx_timer)) + sctp_transport_put(t); + + sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX); + if (!timer_pending(&t->T3_rtx_timer)) { + if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto)) + sctp_transport_hold(t); + } +} diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c new file mode 100644 index 000000000..7635f9f23 --- /dev/null +++ b/net/sctp/tsnmap.c @@ -0,0 +1,379 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * + * This file is part of the SCTP kernel implementation + * + * These functions manipulate sctp tsn mapping array. + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * La Monte H.P. Yarroll <piggy@acm.org> + * Jon Grimm <jgrimm@us.ibm.com> + * Karl Knutson <karl@athena.chicago.il.us> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/bitmap.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static void sctp_tsnmap_update(struct sctp_tsnmap *map); +static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, + __u16 len, __u16 *start, __u16 *end); +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); + +/* Initialize a block of memory as a tsnmap. */ +struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, + __u32 initial_tsn, gfp_t gfp) +{ + if (!map->tsn_map) { + map->tsn_map = kzalloc(len>>3, gfp); + if (map->tsn_map == NULL) + return NULL; + + map->len = len; + } else { + bitmap_zero(map->tsn_map, map->len); + } + + /* Keep track of TSNs represented by tsn_map. */ + map->base_tsn = initial_tsn; + map->cumulative_tsn_ack_point = initial_tsn - 1; + map->max_tsn_seen = map->cumulative_tsn_ack_point; + map->num_dup_tsns = 0; + + return map; +} + +void sctp_tsnmap_free(struct sctp_tsnmap *map) +{ + map->len = 0; + kfree(map->tsn_map); +} + +/* Test the tracking state of this TSN. + * Returns: + * 0 if the TSN has not yet been seen + * >0 if the TSN has been seen (duplicate) + * <0 if the TSN is invalid (too large to track) + */ +int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn) +{ + u32 gap; + + /* Check to see if this is an old TSN */ + if (TSN_lte(tsn, map->cumulative_tsn_ack_point)) + return 1; + + /* Verify that we can hold this TSN and that it will not + * overlfow our map + */ + if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) + return -1; + + /* Calculate the index into the mapping arrays. */ + gap = tsn - map->base_tsn; + + /* Check to see if TSN has already been recorded. */ + if (gap < map->len && test_bit(gap, map->tsn_map)) + return 1; + else + return 0; +} + + +/* Mark this TSN as seen. */ +int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, + struct sctp_transport *trans) +{ + u16 gap; + + if (TSN_lt(tsn, map->base_tsn)) + return 0; + + gap = tsn - map->base_tsn; + + if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) + return -ENOMEM; + + if (!sctp_tsnmap_has_gap(map) && gap == 0) { + /* In this case the map has no gaps and the tsn we are + * recording is the next expected tsn. We don't touch + * the map but simply bump the values. + */ + map->max_tsn_seen++; + map->cumulative_tsn_ack_point++; + if (trans) + trans->sack_generation = + trans->asoc->peer.sack_generation; + map->base_tsn++; + } else { + /* Either we already have a gap, or about to record a gap, so + * have work to do. + * + * Bump the max. + */ + if (TSN_lt(map->max_tsn_seen, tsn)) + map->max_tsn_seen = tsn; + + /* Mark the TSN as received. */ + set_bit(gap, map->tsn_map); + + /* Go fixup any internal TSN mapping variables including + * cumulative_tsn_ack_point. + */ + sctp_tsnmap_update(map); + } + + return 0; +} + + +/* Initialize a Gap Ack Block iterator from memory being provided. */ +static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, + struct sctp_tsnmap_iter *iter) +{ + /* Only start looking one past the Cumulative TSN Ack Point. */ + iter->start = map->cumulative_tsn_ack_point + 1; +} + +/* Get the next Gap Ack Blocks. Returns 0 if there was not another block + * to get. + */ +static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, + struct sctp_tsnmap_iter *iter, + __u16 *start, __u16 *end) +{ + int ended = 0; + __u16 start_ = 0, end_ = 0, offset; + + /* If there are no more gap acks possible, get out fast. */ + if (TSN_lte(map->max_tsn_seen, iter->start)) + return 0; + + offset = iter->start - map->base_tsn; + sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, + &start_, &end_); + + /* The Gap Ack Block happens to end at the end of the map. */ + if (start_ && !end_) + end_ = map->len - 1; + + /* If we found a Gap Ack Block, return the start and end and + * bump the iterator forward. + */ + if (end_) { + /* Fix up the start and end based on the + * Cumulative TSN Ack which is always 1 behind base. + */ + *start = start_ + 1; + *end = end_ + 1; + + /* Move the iterator forward. */ + iter->start = map->cumulative_tsn_ack_point + *end + 1; + ended = 1; + } + + return ended; +} + +/* Mark this and any lower TSN as seen. */ +void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) +{ + u32 gap; + + if (TSN_lt(tsn, map->base_tsn)) + return; + if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) + return; + + /* Bump the max. */ + if (TSN_lt(map->max_tsn_seen, tsn)) + map->max_tsn_seen = tsn; + + gap = tsn - map->base_tsn + 1; + + map->base_tsn += gap; + map->cumulative_tsn_ack_point += gap; + if (gap >= map->len) { + /* If our gap is larger then the map size, just + * zero out the map. + */ + bitmap_zero(map->tsn_map, map->len); + } else { + /* If the gap is smaller than the map size, + * shift the map by 'gap' bits and update further. + */ + bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); + sctp_tsnmap_update(map); + } +} + +/******************************************************************** + * 2nd Level Abstractions + ********************************************************************/ + +/* This private helper function updates the tsnmap buffers and + * the Cumulative TSN Ack Point. + */ +static void sctp_tsnmap_update(struct sctp_tsnmap *map) +{ + u16 len; + unsigned long zero_bit; + + + len = map->max_tsn_seen - map->cumulative_tsn_ack_point; + zero_bit = find_first_zero_bit(map->tsn_map, len); + if (!zero_bit) + return; /* The first 0-bit is bit 0. nothing to do */ + + map->base_tsn += zero_bit; + map->cumulative_tsn_ack_point += zero_bit; + + bitmap_shift_right(map->tsn_map, map->tsn_map, zero_bit, map->len); +} + +/* How many data chunks are we missing from our peer? + */ +__u16 sctp_tsnmap_pending(struct sctp_tsnmap *map) +{ + __u32 cum_tsn = map->cumulative_tsn_ack_point; + __u32 max_tsn = map->max_tsn_seen; + __u32 base_tsn = map->base_tsn; + __u16 pending_data; + u32 gap; + + pending_data = max_tsn - cum_tsn; + gap = max_tsn - base_tsn; + + if (gap == 0 || gap >= map->len) + goto out; + + pending_data -= bitmap_weight(map->tsn_map, gap + 1); +out: + return pending_data; +} + +/* This is a private helper for finding Gap Ack Blocks. It searches a + * single array for the start and end of a Gap Ack Block. + * + * The flags "started" and "ended" tell is if we found the beginning + * or (respectively) the end of a Gap Ack Block. + */ +static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, + __u16 len, __u16 *start, __u16 *end) +{ + int i = off; + + /* Look through the entire array, but break out + * early if we have found the end of the Gap Ack Block. + */ + + /* Also, stop looking past the maximum TSN seen. */ + + /* Look for the start. */ + i = find_next_bit(map, len, off); + if (i < len) + *start = i; + + /* Look for the end. */ + if (*start) { + /* We have found the start, let's find the + * end. If we find the end, break out. + */ + i = find_next_zero_bit(map, len, i); + if (i < len) + *end = i - 1; + } +} + +/* Renege that we have seen a TSN. */ +void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn) +{ + u32 gap; + + if (TSN_lt(tsn, map->base_tsn)) + return; + /* Assert: TSN is in range. */ + if (!TSN_lt(tsn, map->base_tsn + map->len)) + return; + + gap = tsn - map->base_tsn; + + /* Pretend we never saw the TSN. */ + clear_bit(gap, map->tsn_map); +} + +/* How many gap ack blocks do we have recorded? */ +__u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, + struct sctp_gap_ack_block *gabs) +{ + struct sctp_tsnmap_iter iter; + int ngaps = 0; + + /* Refresh the gap ack information. */ + if (sctp_tsnmap_has_gap(map)) { + __u16 start = 0, end = 0; + sctp_tsnmap_iter_init(map, &iter); + while (sctp_tsnmap_next_gap_ack(map, &iter, + &start, + &end)) { + + gabs[ngaps].start = htons(start); + gabs[ngaps].end = htons(end); + ngaps++; + if (ngaps >= SCTP_MAX_GABS) + break; + } + } + return ngaps; +} + +static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) +{ + unsigned long *new; + unsigned long inc; + u16 len; + + if (size > SCTP_TSN_MAP_SIZE) + return 0; + + inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; + len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); + + new = kzalloc(len>>3, GFP_ATOMIC); + if (!new) + return 0; + + bitmap_copy(new, map->tsn_map, + map->max_tsn_seen - map->cumulative_tsn_ack_point); + kfree(map->tsn_map); + map->tsn_map = new; + map->len = len; + + return 1; +} diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c new file mode 100644 index 000000000..d1e38308f --- /dev/null +++ b/net/sctp/ulpevent.c @@ -0,0 +1,1065 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * These functions manipulate an sctp event. The struct ulpevent is used + * to carry notifications and data to the ULP (sockets). + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Jon Grimm <jgrimm@us.ibm.com> + * La Monte H.P. Yarroll <piggy@acm.org> + * Ardelle Fan <ardelle.fan@intel.com> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <net/sctp/structs.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, + struct sctp_association *asoc); +static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); +static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event); + + +/* Initialize an ULP event from an given skb. */ +static void sctp_ulpevent_init(struct sctp_ulpevent *event, + int msg_flags, + unsigned int len) +{ + memset(event, 0, sizeof(struct sctp_ulpevent)); + event->msg_flags = msg_flags; + event->rmem_len = len; +} + +/* Create a new sctp_ulpevent. */ +static struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, + gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sk_buff *skb; + + skb = alloc_skb(size, gfp); + if (!skb) + goto fail; + + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, msg_flags, skb->truesize); + + return event; + +fail: + return NULL; +} + +/* Is this a MSG_NOTIFICATION? */ +int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) +{ + return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); +} + +/* Hold the association in case the msg_name needs read out of + * the association. + */ +static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, + const struct sctp_association *asoc) +{ + struct sk_buff *skb; + + /* Cast away the const, as we are just wanting to + * bump the reference count. + */ + sctp_association_hold((struct sctp_association *)asoc); + skb = sctp_event2skb(event); + event->asoc = (struct sctp_association *)asoc; + atomic_add(event->rmem_len, &event->asoc->rmem_alloc); + sctp_skb_set_owner_r(skb, asoc->base.sk); +} + +/* A simple destructor to give up the reference to the association. */ +static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) +{ + struct sctp_association *asoc = event->asoc; + + atomic_sub(event->rmem_len, &asoc->rmem_alloc); + sctp_association_put(asoc); +} + +/* Create and initialize an SCTP_ASSOC_CHANGE event. + * + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * Communication notifications inform the ULP that an SCTP association + * has either begun or ended. The identifier for a new association is + * provided by this notification. + * + * Note: There is no field checking here. If a field is unused it will be + * zero'd out. + */ +struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( + const struct sctp_association *asoc, + __u16 flags, __u16 state, __u16 error, __u16 outbound, + __u16 inbound, struct sctp_chunk *chunk, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_assoc_change *sac; + struct sk_buff *skb; + + /* If the lower layer passed in the chunk, it will be + * an ABORT, so we need to include it in the sac_info. + */ + if (chunk) { + /* Copy the chunk data to a new skb and reserve enough + * head room to use as notification. + */ + skb = skb_copy_expand(chunk->skb, + sizeof(struct sctp_assoc_change), 0, gfp); + + if (!skb) + goto fail; + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); + + /* Include the notification structure */ + sac = (struct sctp_assoc_change *) + skb_push(skb, sizeof(struct sctp_assoc_change)); + + /* Trim the buffer to the right length. */ + skb_trim(skb, sizeof(struct sctp_assoc_change) + + ntohs(chunk->chunk_hdr->length) - + sizeof(sctp_chunkhdr_t)); + } else { + event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + sac = (struct sctp_assoc_change *) skb_put(skb, + sizeof(struct sctp_assoc_change)); + } + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_type: + * It should be SCTP_ASSOC_CHANGE. + */ + sac->sac_type = SCTP_ASSOC_CHANGE; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_state: 32 bits (signed integer) + * This field holds one of a number of values that communicate the + * event that happened to the association. + */ + sac->sac_state = state; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_flags: 16 bits (unsigned integer) + * Currently unused. + */ + sac->sac_flags = 0; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_length: sizeof (__u32) + * This field is the total length of the notification data, including + * the notification header. + */ + sac->sac_length = skb->len; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_error: 32 bits (signed integer) + * + * If the state was reached due to a error condition (e.g. + * COMMUNICATION_LOST) any relevant error information is available in + * this field. This corresponds to the protocol error codes defined in + * [SCTP]. + */ + sac->sac_error = error; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_outbound_streams: 16 bits (unsigned integer) + * sac_inbound_streams: 16 bits (unsigned integer) + * + * The maximum number of streams allowed in each direction are + * available in sac_outbound_streams and sac_inbound streams. + */ + sac->sac_outbound_streams = outbound; + sac->sac_inbound_streams = inbound; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * sac_assoc_id: sizeof (sctp_assoc_t) + * + * The association id field, holds the identifier for the association. + * All notifications for a given association have the same association + * identifier. For TCP style socket, this field is ignored. + */ + sctp_ulpevent_set_owner(event, asoc); + sac->sac_assoc_id = sctp_assoc2id(asoc); + + return event; + +fail: + return NULL; +} + +/* Create and initialize an SCTP_PEER_ADDR_CHANGE event. + * + * Socket Extensions for SCTP - draft-01 + * 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * When a destination address on a multi-homed peer encounters a change + * an interface details event is sent. + */ +struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( + const struct sctp_association *asoc, + const struct sockaddr_storage *aaddr, + int flags, int state, int error, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_paddr_change *spc; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + spc = (struct sctp_paddr_change *) + skb_put(skb, sizeof(struct sctp_paddr_change)); + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_type: + * + * It should be SCTP_PEER_ADDR_CHANGE. + */ + spc->spc_type = SCTP_PEER_ADDR_CHANGE; + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_length: sizeof (__u32) + * + * This field is the total length of the notification data, including + * the notification header. + */ + spc->spc_length = sizeof(struct sctp_paddr_change); + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_flags: 16 bits (unsigned integer) + * Currently unused. + */ + spc->spc_flags = 0; + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_state: 32 bits (signed integer) + * + * This field holds one of a number of values that communicate the + * event that happened to the address. + */ + spc->spc_state = state; + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_error: 32 bits (signed integer) + * + * If the state was reached due to any error condition (e.g. + * ADDRESS_UNREACHABLE) any relevant error information is available in + * this field. + */ + spc->spc_error = error; + + /* Socket Extensions for SCTP + * 5.3.1.1 SCTP_ASSOC_CHANGE + * + * spc_assoc_id: sizeof (sctp_assoc_t) + * + * The association id field, holds the identifier for the association. + * All notifications for a given association have the same association + * identifier. For TCP style socket, this field is ignored. + */ + sctp_ulpevent_set_owner(event, asoc); + spc->spc_assoc_id = sctp_assoc2id(asoc); + + /* Sockets API Extensions for SCTP + * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE + * + * spc_aaddr: sizeof (struct sockaddr_storage) + * + * The affected address field, holds the remote peer's address that is + * encountering the change of state. + */ + memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage)); + + /* Map ipv4 address into v4-mapped-on-v6 address. */ + sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_to_user( + sctp_sk(asoc->base.sk), + (union sctp_addr *)&spc->spc_aaddr); + + return event; + +fail: + return NULL; +} + +/* Create and initialize an SCTP_REMOTE_ERROR notification. + * + * Note: This assumes that the chunk->skb->data already points to the + * operation error payload. + * + * Socket Extensions for SCTP - draft-01 + * 5.3.1.3 SCTP_REMOTE_ERROR + * + * A remote peer may send an Operational Error message to its peer. + * This message indicates a variety of error conditions on an + * association. The entire error TLV as it appears on the wire is + * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP + * specification [SCTP] and any extensions for a list of possible + * error formats. + */ +struct sctp_ulpevent * +sctp_ulpevent_make_remote_error(const struct sctp_association *asoc, + struct sctp_chunk *chunk, __u16 flags, + gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_remote_error *sre; + struct sk_buff *skb; + sctp_errhdr_t *ch; + __be16 cause; + int elen; + + ch = (sctp_errhdr_t *)(chunk->skb->data); + cause = ch->cause; + elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t); + + /* Pull off the ERROR header. */ + skb_pull(chunk->skb, sizeof(sctp_errhdr_t)); + + /* Copy the skb to a new skb with room for us to prepend + * notification with. + */ + skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp); + + /* Pull off the rest of the cause TLV from the chunk. */ + skb_pull(chunk->skb, elen); + if (!skb) + goto fail; + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); + + sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre)); + + /* Trim the buffer to the right length. */ + skb_trim(skb, sizeof(*sre) + elen); + + /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */ + memset(sre, 0, sizeof(*sre)); + sre->sre_type = SCTP_REMOTE_ERROR; + sre->sre_flags = 0; + sre->sre_length = skb->len; + sre->sre_error = cause; + sctp_ulpevent_set_owner(event, asoc); + sre->sre_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + +/* Create and initialize a SCTP_SEND_FAILED notification. + * + * Socket Extensions for SCTP - draft-01 + * 5.3.1.4 SCTP_SEND_FAILED + */ +struct sctp_ulpevent *sctp_ulpevent_make_send_failed( + const struct sctp_association *asoc, struct sctp_chunk *chunk, + __u16 flags, __u32 error, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_send_failed *ssf; + struct sk_buff *skb; + + /* Pull off any padding. */ + int len = ntohs(chunk->chunk_hdr->length); + + /* Make skb with more room so we can prepend notification. */ + skb = skb_copy_expand(chunk->skb, + sizeof(struct sctp_send_failed), /* headroom */ + 0, /* tailroom */ + gfp); + if (!skb) + goto fail; + + /* Pull off the common chunk header and DATA header. */ + skb_pull(skb, sizeof(struct sctp_data_chunk)); + len -= sizeof(struct sctp_data_chunk); + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); + + ssf = (struct sctp_send_failed *) + skb_push(skb, sizeof(struct sctp_send_failed)); + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_type: + * It should be SCTP_SEND_FAILED. + */ + ssf->ssf_type = SCTP_SEND_FAILED; + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_flags: 16 bits (unsigned integer) + * The flag value will take one of the following values + * + * SCTP_DATA_UNSENT - Indicates that the data was never put on + * the wire. + * + * SCTP_DATA_SENT - Indicates that the data was put on the wire. + * Note that this does not necessarily mean that the + * data was (or was not) successfully delivered. + */ + ssf->ssf_flags = flags; + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_length: sizeof (__u32) + * This field is the total length of the notification data, including + * the notification header. + */ + ssf->ssf_length = sizeof(struct sctp_send_failed) + len; + skb_trim(skb, ssf->ssf_length); + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_error: 16 bits (unsigned integer) + * This value represents the reason why the send failed, and if set, + * will be a SCTP protocol error code as defined in [SCTP] section + * 3.3.10. + */ + ssf->ssf_error = error; + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_info: sizeof (struct sctp_sndrcvinfo) + * The original send information associated with the undelivered + * message. + */ + memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); + + /* Per TSVWG discussion with Randy. Allow the application to + * reassemble a fragmented message. + */ + ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; + + /* Socket Extensions for SCTP + * 5.3.1.4 SCTP_SEND_FAILED + * + * ssf_assoc_id: sizeof (sctp_assoc_t) + * The association id field, sf_assoc_id, holds the identifier for the + * association. All notifications for a given association have the + * same association identifier. For TCP style socket, this field is + * ignored. + */ + sctp_ulpevent_set_owner(event, asoc); + ssf->ssf_assoc_id = sctp_assoc2id(asoc); + return event; + +fail: + return NULL; +} + +/* Create and initialize a SCTP_SHUTDOWN_EVENT notification. + * + * Socket Extensions for SCTP - draft-01 + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + */ +struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( + const struct sctp_association *asoc, + __u16 flags, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_shutdown_event *sse; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + sse = (struct sctp_shutdown_event *) + skb_put(skb, sizeof(struct sctp_shutdown_event)); + + /* Socket Extensions for SCTP + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + * + * sse_type + * It should be SCTP_SHUTDOWN_EVENT + */ + sse->sse_type = SCTP_SHUTDOWN_EVENT; + + /* Socket Extensions for SCTP + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + * + * sse_flags: 16 bits (unsigned integer) + * Currently unused. + */ + sse->sse_flags = 0; + + /* Socket Extensions for SCTP + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + * + * sse_length: sizeof (__u32) + * This field is the total length of the notification data, including + * the notification header. + */ + sse->sse_length = sizeof(struct sctp_shutdown_event); + + /* Socket Extensions for SCTP + * 5.3.1.5 SCTP_SHUTDOWN_EVENT + * + * sse_assoc_id: sizeof (sctp_assoc_t) + * The association id field, holds the identifier for the association. + * All notifications for a given association have the same association + * identifier. For TCP style socket, this field is ignored. + */ + sctp_ulpevent_set_owner(event, asoc); + sse->sse_assoc_id = sctp_assoc2id(asoc); + + return event; + +fail: + return NULL; +} + +/* Create and initialize a SCTP_ADAPTATION_INDICATION notification. + * + * Socket Extensions for SCTP + * 5.3.1.6 SCTP_ADAPTATION_INDICATION + */ +struct sctp_ulpevent *sctp_ulpevent_make_adaptation_indication( + const struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_adaptation_event *sai; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_adaptation_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + sai = (struct sctp_adaptation_event *) + skb_put(skb, sizeof(struct sctp_adaptation_event)); + + sai->sai_type = SCTP_ADAPTATION_INDICATION; + sai->sai_flags = 0; + sai->sai_length = sizeof(struct sctp_adaptation_event); + sai->sai_adaptation_ind = asoc->peer.adaptation_ind; + sctp_ulpevent_set_owner(event, asoc); + sai->sai_assoc_id = sctp_assoc2id(asoc); + + return event; + +fail: + return NULL; +} + +/* A message has been received. Package this message as a notification + * to pass it to the upper layers. Go ahead and calculate the sndrcvinfo + * even if filtered out later. + * + * Socket Extensions for SCTP + * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) + */ +struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, + struct sctp_chunk *chunk, + gfp_t gfp) +{ + struct sctp_ulpevent *event = NULL; + struct sk_buff *skb; + size_t padding, len; + int rx_count; + + /* + * check to see if we need to make space for this + * new skb, expand the rcvbuffer if needed, or drop + * the frame + */ + if (asoc->ep->rcvbuf_policy) + rx_count = atomic_read(&asoc->rmem_alloc); + else + rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc); + + if (rx_count >= asoc->base.sk->sk_rcvbuf) { + + if ((asoc->base.sk->sk_userlocks & SOCK_RCVBUF_LOCK) || + (!sk_rmem_schedule(asoc->base.sk, chunk->skb, + chunk->skb->truesize))) + goto fail; + } + + /* Clone the original skb, sharing the data. */ + skb = skb_clone(chunk->skb, gfp); + if (!skb) + goto fail; + + /* Now that all memory allocations for this chunk succeeded, we + * can mark it as received so the tsn_map is updated correctly. + */ + if (sctp_tsnmap_mark(&asoc->peer.tsn_map, + ntohl(chunk->subh.data_hdr->tsn), + chunk->transport)) + goto fail_mark; + + /* First calculate the padding, so we don't inadvertently + * pass up the wrong length to the user. + * + * RFC 2960 - Section 3.2 Chunk Field Descriptions + * + * The total length of a chunk(including Type, Length and Value fields) + * MUST be a multiple of 4 bytes. If the length of the chunk is not a + * multiple of 4 bytes, the sender MUST pad the chunk with all zero + * bytes and this padding is not included in the chunk length field. + * The sender should never pad with more than 3 bytes. The receiver + * MUST ignore the padding bytes. + */ + len = ntohs(chunk->chunk_hdr->length); + padding = WORD_ROUND(len) - len; + + /* Fixup cloned skb with just this chunks data. */ + skb_trim(skb, chunk->chunk_end - padding - skb->data); + + /* Embed the event fields inside the cloned skb. */ + event = sctp_skb2event(skb); + + /* Initialize event with flags 0 and correct length + * Since this is a clone of the original skb, only account for + * the data of this chunk as other chunks will be accounted separately. + */ + sctp_ulpevent_init(event, 0, skb->len + sizeof(struct sk_buff)); + + sctp_ulpevent_receive_data(event, asoc); + + event->stream = ntohs(chunk->subh.data_hdr->stream); + event->ssn = ntohs(chunk->subh.data_hdr->ssn); + event->ppid = chunk->subh.data_hdr->ppid; + if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { + event->flags |= SCTP_UNORDERED; + event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); + } + event->tsn = ntohl(chunk->subh.data_hdr->tsn); + event->msg_flags |= chunk->chunk_hdr->flags; + event->iif = sctp_chunk_iif(chunk); + + return event; + +fail_mark: + kfree_skb(skb); +fail: + return NULL; +} + +/* Create a partial delivery related event. + * + * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT + * + * When a receiver is engaged in a partial delivery of a + * message this notification will be used to indicate + * various events. + */ +struct sctp_ulpevent *sctp_ulpevent_make_pdapi( + const struct sctp_association *asoc, __u32 indication, + gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_pdapi_event *pd; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + pd = (struct sctp_pdapi_event *) + skb_put(skb, sizeof(struct sctp_pdapi_event)); + + /* pdapi_type + * It should be SCTP_PARTIAL_DELIVERY_EVENT + * + * pdapi_flags: 16 bits (unsigned integer) + * Currently unused. + */ + pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; + pd->pdapi_flags = 0; + + /* pdapi_length: 32 bits (unsigned integer) + * + * This field is the total length of the notification data, including + * the notification header. It will generally be sizeof (struct + * sctp_pdapi_event). + */ + pd->pdapi_length = sizeof(struct sctp_pdapi_event); + + /* pdapi_indication: 32 bits (unsigned integer) + * + * This field holds the indication being sent to the application. + */ + pd->pdapi_indication = indication; + + /* pdapi_assoc_id: sizeof (sctp_assoc_t) + * + * The association id field, holds the identifier for the association. + */ + sctp_ulpevent_set_owner(event, asoc); + pd->pdapi_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + +struct sctp_ulpevent *sctp_ulpevent_make_authkey( + const struct sctp_association *asoc, __u16 key_id, + __u32 indication, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_authkey_event *ak; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_authkey_event), + MSG_NOTIFICATION, gfp); + if (!event) + goto fail; + + skb = sctp_event2skb(event); + ak = (struct sctp_authkey_event *) + skb_put(skb, sizeof(struct sctp_authkey_event)); + + ak->auth_type = SCTP_AUTHENTICATION_EVENT; + ak->auth_flags = 0; + ak->auth_length = sizeof(struct sctp_authkey_event); + + ak->auth_keynumber = key_id; + ak->auth_altkeynumber = 0; + ak->auth_indication = indication; + + /* + * The association id field, holds the identifier for the association. + */ + sctp_ulpevent_set_owner(event, asoc); + ak->auth_assoc_id = sctp_assoc2id(asoc); + + return event; +fail: + return NULL; +} + +/* + * Socket Extensions for SCTP + * 6.3.10. SCTP_SENDER_DRY_EVENT + */ +struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( + const struct sctp_association *asoc, gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_sender_dry_event *sdry; + struct sk_buff *skb; + + event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event), + MSG_NOTIFICATION, gfp); + if (!event) + return NULL; + + skb = sctp_event2skb(event); + sdry = (struct sctp_sender_dry_event *) + skb_put(skb, sizeof(struct sctp_sender_dry_event)); + + sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT; + sdry->sender_dry_flags = 0; + sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event); + sctp_ulpevent_set_owner(event, asoc); + sdry->sender_dry_assoc_id = sctp_assoc2id(asoc); + + return event; +} + +/* Return the notification type, assuming this is a notification + * event. + */ +__u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) +{ + union sctp_notification *notification; + struct sk_buff *skb; + + skb = sctp_event2skb(event); + notification = (union sctp_notification *) skb->data; + return notification->sn_header.sn_type; +} + +/* RFC6458, Section 5.3.2. SCTP Header Information Structure + * (SCTP_SNDRCV, DEPRECATED) + */ +void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, + struct msghdr *msghdr) +{ + struct sctp_sndrcvinfo sinfo; + + if (sctp_ulpevent_is_notification(event)) + return; + + memset(&sinfo, 0, sizeof(sinfo)); + sinfo.sinfo_stream = event->stream; + sinfo.sinfo_ssn = event->ssn; + sinfo.sinfo_ppid = event->ppid; + sinfo.sinfo_flags = event->flags; + sinfo.sinfo_tsn = event->tsn; + sinfo.sinfo_cumtsn = event->cumtsn; + sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); + /* Context value that is set via SCTP_CONTEXT socket option. */ + sinfo.sinfo_context = event->asoc->default_rcv_context; + /* These fields are not used while receiving. */ + sinfo.sinfo_timetolive = 0; + + put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, + sizeof(sinfo), &sinfo); +} + +/* RFC6458, Section 5.3.5 SCTP Receive Information Structure + * (SCTP_SNDRCV) + */ +void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event, + struct msghdr *msghdr) +{ + struct sctp_rcvinfo rinfo; + + if (sctp_ulpevent_is_notification(event)) + return; + + memset(&rinfo, 0, sizeof(struct sctp_rcvinfo)); + rinfo.rcv_sid = event->stream; + rinfo.rcv_ssn = event->ssn; + rinfo.rcv_ppid = event->ppid; + rinfo.rcv_flags = event->flags; + rinfo.rcv_tsn = event->tsn; + rinfo.rcv_cumtsn = event->cumtsn; + rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc); + rinfo.rcv_context = event->asoc->default_rcv_context; + + put_cmsg(msghdr, IPPROTO_SCTP, SCTP_RCVINFO, + sizeof(rinfo), &rinfo); +} + +/* RFC6458, Section 5.3.6. SCTP Next Receive Information Structure + * (SCTP_NXTINFO) + */ +static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, + struct msghdr *msghdr, + const struct sk_buff *skb) +{ + struct sctp_nxtinfo nxtinfo; + + memset(&nxtinfo, 0, sizeof(nxtinfo)); + nxtinfo.nxt_sid = event->stream; + nxtinfo.nxt_ppid = event->ppid; + nxtinfo.nxt_flags = event->flags; + if (sctp_ulpevent_is_notification(event)) + nxtinfo.nxt_flags |= SCTP_NOTIFICATION; + nxtinfo.nxt_length = skb->len; + nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc); + + put_cmsg(msghdr, IPPROTO_SCTP, SCTP_NXTINFO, + sizeof(nxtinfo), &nxtinfo); +} + +void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event, + struct msghdr *msghdr, + struct sock *sk) +{ + struct sk_buff *skb; + int err; + + skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err); + if (skb != NULL) { + __sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb), + msghdr, skb); + /* Just release refcount here. */ + kfree_skb(skb); + } +} + +/* Do accounting for bytes received and hold a reference to the association + * for each skb. + */ +static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, + struct sctp_association *asoc) +{ + struct sk_buff *skb, *frag; + + skb = sctp_event2skb(event); + /* Set the owner and charge rwnd for bytes received. */ + sctp_ulpevent_set_owner(event, asoc); + sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); + + if (!skb->data_len) + return; + + /* Note: Not clearing the entire event struct as this is just a + * fragment of the real event. However, we still need to do rwnd + * accounting. + * In general, the skb passed from IP can have only 1 level of + * fragments. But we allow multiple levels of fragments. + */ + skb_walk_frags(skb, frag) + sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); +} + +/* Do accounting for bytes just read by user and release the references to + * the association. + */ +static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) +{ + struct sk_buff *skb, *frag; + unsigned int len; + + /* Current stack structures assume that the rcv buffer is + * per socket. For UDP style sockets this is not true as + * multiple associations may be on a single UDP-style socket. + * Use the local private area of the skb to track the owning + * association. + */ + + skb = sctp_event2skb(event); + len = skb->len; + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + skb_walk_frags(skb, frag) { + /* NOTE: skb_shinfos are recursive. Although IP returns + * skb's with only 1 level of fragments, SCTP reassembly can + * increase the levels. + */ + sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); + } + +done: + sctp_assoc_rwnd_increase(event->asoc, len); + sctp_ulpevent_release_owner(event); +} + +static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event) +{ + struct sk_buff *skb, *frag; + + skb = sctp_event2skb(event); + + if (!skb->data_len) + goto done; + + /* Don't forget the fragments. */ + skb_walk_frags(skb, frag) { + /* NOTE: skb_shinfos are recursive. Although IP returns + * skb's with only 1 level of fragments, SCTP reassembly can + * increase the levels. + */ + sctp_ulpevent_release_frag_data(sctp_skb2event(frag)); + } + +done: + sctp_ulpevent_release_owner(event); +} + +/* Free a ulpevent that has an owner. It includes releasing the reference + * to the owner, updating the rwnd in case of a DATA event and freeing the + * skb. + */ +void sctp_ulpevent_free(struct sctp_ulpevent *event) +{ + if (sctp_ulpevent_is_notification(event)) + sctp_ulpevent_release_owner(event); + else + sctp_ulpevent_release_data(event); + + kfree_skb(sctp_event2skb(event)); +} + +/* Purge the skb lists holding ulpevents. */ +unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list) +{ + struct sk_buff *skb; + unsigned int data_unread = 0; + + while ((skb = skb_dequeue(list)) != NULL) { + struct sctp_ulpevent *event = sctp_skb2event(skb); + + if (!sctp_ulpevent_is_notification(event)) + data_unread += skb->len; + + sctp_ulpevent_free(event); + } + + return data_unread; +} diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c new file mode 100644 index 000000000..ce469d648 --- /dev/null +++ b/net/sctp/ulpqueue.c @@ -0,0 +1,1144 @@ +/* SCTP kernel implementation + * (C) Copyright IBM Corp. 2001, 2004 + * Copyright (c) 1999-2000 Cisco, Inc. + * Copyright (c) 1999-2001 Motorola, Inc. + * Copyright (c) 2001 Intel Corp. + * Copyright (c) 2001 Nokia, Inc. + * Copyright (c) 2001 La Monte H.P. Yarroll + * + * This abstraction carries sctp events to the ULP (sockets). + * + * This SCTP implementation is free software; + * you can redistribute it and/or modify it under the terms of + * the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This SCTP implementation is distributed in the hope that it + * will be useful, but WITHOUT ANY WARRANTY; without even the implied + * ************************ + * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with GNU CC; see the file COPYING. If not, see + * <http://www.gnu.org/licenses/>. + * + * Please send any bug reports or fixes you make to the + * email address(es): + * lksctp developers <linux-sctp@vger.kernel.org> + * + * Written or modified by: + * Jon Grimm <jgrimm@us.ibm.com> + * La Monte H.P. Yarroll <piggy@acm.org> + * Sridhar Samudrala <sri@us.ibm.com> + */ + +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <net/sock.h> +#include <net/busy_poll.h> +#include <net/sctp/structs.h> +#include <net/sctp/sctp.h> +#include <net/sctp/sm.h> + +/* Forward declarations for internal helpers. */ +static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *); +static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *, + struct sctp_ulpevent *); +static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq); + +/* 1st Level Abstractions */ + +/* Initialize a ULP queue from a block of memory. */ +struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, + struct sctp_association *asoc) +{ + memset(ulpq, 0, sizeof(struct sctp_ulpq)); + + ulpq->asoc = asoc; + skb_queue_head_init(&ulpq->reasm); + skb_queue_head_init(&ulpq->lobby); + ulpq->pd_mode = 0; + + return ulpq; +} + + +/* Flush the reassembly and ordering queues. */ +void sctp_ulpq_flush(struct sctp_ulpq *ulpq) +{ + struct sk_buff *skb; + struct sctp_ulpevent *event; + + while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { + event = sctp_skb2event(skb); + sctp_ulpevent_free(event); + } + + while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { + event = sctp_skb2event(skb); + sctp_ulpevent_free(event); + } + +} + +/* Dispose of a ulpqueue. */ +void sctp_ulpq_free(struct sctp_ulpq *ulpq) +{ + sctp_ulpq_flush(ulpq); +} + +/* Process an incoming DATA chunk. */ +int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, + gfp_t gfp) +{ + struct sk_buff_head temp; + struct sctp_ulpevent *event; + int event_eor = 0; + + /* Create an event from the incoming chunk. */ + event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); + if (!event) + return -ENOMEM; + + /* Do reassembly if needed. */ + event = sctp_ulpq_reasm(ulpq, event); + + /* Do ordering if needed. */ + if ((event) && (event->msg_flags & MSG_EOR)) { + /* Create a temporary list to collect chunks on. */ + skb_queue_head_init(&temp); + __skb_queue_tail(&temp, sctp_event2skb(event)); + + event = sctp_ulpq_order(ulpq, event); + } + + /* Send event to the ULP. 'event' is the sctp_ulpevent for + * very first SKB on the 'temp' list. + */ + if (event) { + event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0; + sctp_ulpq_tail_event(ulpq, event); + } + + return event_eor; +} + +/* Add a new event for propagation to the ULP. */ +/* Clear the partial delivery mode for this socket. Note: This + * assumes that no association is currently in partial delivery mode. + */ +int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc) +{ + struct sctp_sock *sp = sctp_sk(sk); + + if (atomic_dec_and_test(&sp->pd_mode)) { + /* This means there are no other associations in PD, so + * we can go ahead and clear out the lobby in one shot + */ + if (!skb_queue_empty(&sp->pd_lobby)) { + struct list_head *list; + sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); + list = (struct list_head *)&sctp_sk(sk)->pd_lobby; + INIT_LIST_HEAD(list); + return 1; + } + } else { + /* There are other associations in PD, so we only need to + * pull stuff out of the lobby that belongs to the + * associations that is exiting PD (all of its notifications + * are posted here). + */ + if (!skb_queue_empty(&sp->pd_lobby) && asoc) { + struct sk_buff *skb, *tmp; + struct sctp_ulpevent *event; + + sctp_skb_for_each(skb, &sp->pd_lobby, tmp) { + event = sctp_skb2event(skb); + if (event->asoc == asoc) { + __skb_unlink(skb, &sp->pd_lobby); + __skb_queue_tail(&sk->sk_receive_queue, + skb); + } + } + } + } + + return 0; +} + +/* Set the pd_mode on the socket and ulpq */ +static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq) +{ + struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk); + + atomic_inc(&sp->pd_mode); + ulpq->pd_mode = 1; +} + +/* Clear the pd_mode and restart any pending messages waiting for delivery. */ +static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) +{ + ulpq->pd_mode = 0; + sctp_ulpq_reasm_drain(ulpq); + return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc); +} + +/* If the SKB of 'event' is on a list, it is the first such member + * of that list. + */ +int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) +{ + struct sock *sk = ulpq->asoc->base.sk; + struct sk_buff_head *queue, *skb_list; + struct sk_buff *skb = sctp_event2skb(event); + int clear_pd = 0; + + skb_list = (struct sk_buff_head *) skb->prev; + + /* If the socket is just going to throw this away, do not + * even try to deliver it. + */ + if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) + goto out_free; + + if (!sctp_ulpevent_is_notification(event)) { + sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); + } + /* Check if the user wishes to receive this event. */ + if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) + goto out_free; + + /* If we are in partial delivery mode, post to the lobby until + * partial delivery is cleared, unless, of course _this_ is + * the association the cause of the partial delivery. + */ + + if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) { + queue = &sk->sk_receive_queue; + } else { + if (ulpq->pd_mode) { + /* If the association is in partial delivery, we + * need to finish delivering the partially processed + * packet before passing any other data. This is + * because we don't truly support stream interleaving. + */ + if ((event->msg_flags & MSG_NOTIFICATION) || + (SCTP_DATA_NOT_FRAG == + (event->msg_flags & SCTP_DATA_FRAG_MASK))) + queue = &sctp_sk(sk)->pd_lobby; + else { + clear_pd = event->msg_flags & MSG_EOR; + queue = &sk->sk_receive_queue; + } + } else { + /* + * If fragment interleave is enabled, we + * can queue this to the receive queue instead + * of the lobby. + */ + if (sctp_sk(sk)->frag_interleave) + queue = &sk->sk_receive_queue; + else + queue = &sctp_sk(sk)->pd_lobby; + } + } + + /* If we are harvesting multiple skbs they will be + * collected on a list. + */ + if (skb_list) + sctp_skb_list_tail(skb_list, queue); + else + __skb_queue_tail(queue, skb); + + /* Did we just complete partial delivery and need to get + * rolling again? Move pending data to the receive + * queue. + */ + if (clear_pd) + sctp_ulpq_clear_pd(ulpq); + + if (queue == &sk->sk_receive_queue) + sk->sk_data_ready(sk); + return 1; + +out_free: + if (skb_list) + sctp_queue_purge_ulpevents(skb_list); + else + sctp_ulpevent_free(event); + + return 0; +} + +/* 2nd Level Abstractions */ + +/* Helper function to store chunks that need to be reassembled. */ +static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) +{ + struct sk_buff *pos; + struct sctp_ulpevent *cevent; + __u32 tsn, ctsn; + + tsn = event->tsn; + + /* See if it belongs at the end. */ + pos = skb_peek_tail(&ulpq->reasm); + if (!pos) { + __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); + return; + } + + /* Short circuit just dropping it at the end. */ + cevent = sctp_skb2event(pos); + ctsn = cevent->tsn; + if (TSN_lt(ctsn, tsn)) { + __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); + return; + } + + /* Find the right place in this list. We store them by TSN. */ + skb_queue_walk(&ulpq->reasm, pos) { + cevent = sctp_skb2event(pos); + ctsn = cevent->tsn; + + if (TSN_lt(tsn, ctsn)) + break; + } + + /* Insert before pos. */ + __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event)); + +} + +/* Helper function to return an event corresponding to the reassembled + * datagram. + * This routine creates a re-assembled skb given the first and last skb's + * as stored in the reassembly queue. The skb's may be non-linear if the sctp + * payload was fragmented on the way and ip had to reassemble them. + * We add the rest of skb's to the first skb's fraglist. + */ +static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net, + struct sk_buff_head *queue, struct sk_buff *f_frag, + struct sk_buff *l_frag) +{ + struct sk_buff *pos; + struct sk_buff *new = NULL; + struct sctp_ulpevent *event; + struct sk_buff *pnext, *last; + struct sk_buff *list = skb_shinfo(f_frag)->frag_list; + + /* Store the pointer to the 2nd skb */ + if (f_frag == l_frag) + pos = NULL; + else + pos = f_frag->next; + + /* Get the last skb in the f_frag's frag_list if present. */ + for (last = list; list; last = list, list = list->next) + ; + + /* Add the list of remaining fragments to the first fragments + * frag_list. + */ + if (last) + last->next = pos; + else { + if (skb_cloned(f_frag)) { + /* This is a cloned skb, we can't just modify + * the frag_list. We need a new skb to do that. + * Instead of calling skb_unshare(), we'll do it + * ourselves since we need to delay the free. + */ + new = skb_copy(f_frag, GFP_ATOMIC); + if (!new) + return NULL; /* try again later */ + + sctp_skb_set_owner_r(new, f_frag->sk); + + skb_shinfo(new)->frag_list = pos; + } else + skb_shinfo(f_frag)->frag_list = pos; + } + + /* Remove the first fragment from the reassembly queue. */ + __skb_unlink(f_frag, queue); + + /* if we did unshare, then free the old skb and re-assign */ + if (new) { + kfree_skb(f_frag); + f_frag = new; + } + + while (pos) { + + pnext = pos->next; + + /* Update the len and data_len fields of the first fragment. */ + f_frag->len += pos->len; + f_frag->data_len += pos->len; + + /* Remove the fragment from the reassembly queue. */ + __skb_unlink(pos, queue); + + /* Break if we have reached the last fragment. */ + if (pos == l_frag) + break; + pos->next = pnext; + pos = pnext; + } + + event = sctp_skb2event(f_frag); + SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS); + + return event; +} + + +/* Helper function to check if an incoming chunk has filled up the last + * missing fragment in a SCTP datagram and return the corresponding event. + */ +static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) +{ + struct sk_buff *pos; + struct sctp_ulpevent *cevent; + struct sk_buff *first_frag = NULL; + __u32 ctsn, next_tsn; + struct sctp_ulpevent *retval = NULL; + struct sk_buff *pd_first = NULL; + struct sk_buff *pd_last = NULL; + size_t pd_len = 0; + struct sctp_association *asoc; + u32 pd_point; + + /* Initialized to 0 just to avoid compiler warning message. Will + * never be used with this value. It is referenced only after it + * is set when we find the first fragment of a message. + */ + next_tsn = 0; + + /* The chunks are held in the reasm queue sorted by TSN. + * Walk through the queue sequentially and look for a sequence of + * fragmented chunks that complete a datagram. + * 'first_frag' and next_tsn are reset when we find a chunk which + * is the first fragment of a datagram. Once these 2 fields are set + * we expect to find the remaining middle fragments and the last + * fragment in order. If not, first_frag is reset to NULL and we + * start the next pass when we find another first fragment. + * + * There is a potential to do partial delivery if user sets + * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here + * to see if can do PD. + */ + skb_queue_walk(&ulpq->reasm, pos) { + cevent = sctp_skb2event(pos); + ctsn = cevent->tsn; + + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_FIRST_FRAG: + /* If this "FIRST_FRAG" is the first + * element in the queue, then count it towards + * possible PD. + */ + if (pos == ulpq->reasm.next) { + pd_first = pos; + pd_last = pos; + pd_len = pos->len; + } else { + pd_first = NULL; + pd_last = NULL; + pd_len = 0; + } + + first_frag = pos; + next_tsn = ctsn + 1; + break; + + case SCTP_DATA_MIDDLE_FRAG: + if ((first_frag) && (ctsn == next_tsn)) { + next_tsn++; + if (pd_first) { + pd_last = pos; + pd_len += pos->len; + } + } else + first_frag = NULL; + break; + + case SCTP_DATA_LAST_FRAG: + if (first_frag && (ctsn == next_tsn)) + goto found; + else + first_frag = NULL; + break; + } + } + + asoc = ulpq->asoc; + if (pd_first) { + /* Make sure we can enter partial deliver. + * We can trigger partial delivery only if framgent + * interleave is set, or the socket is not already + * in partial delivery. + */ + if (!sctp_sk(asoc->base.sk)->frag_interleave && + atomic_read(&sctp_sk(asoc->base.sk)->pd_mode)) + goto done; + + cevent = sctp_skb2event(pd_first); + pd_point = sctp_sk(asoc->base.sk)->pd_point; + if (pd_point && pd_point <= pd_len) { + retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), + &ulpq->reasm, + pd_first, + pd_last); + if (retval) + sctp_ulpq_set_pd(ulpq); + } + } +done: + return retval; +found: + retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), + &ulpq->reasm, first_frag, pos); + if (retval) + retval->msg_flags |= MSG_EOR; + goto done; +} + +/* Retrieve the next set of fragments of a partial message. */ +static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) +{ + struct sk_buff *pos, *last_frag, *first_frag; + struct sctp_ulpevent *cevent; + __u32 ctsn, next_tsn; + int is_last; + struct sctp_ulpevent *retval; + + /* The chunks are held in the reasm queue sorted by TSN. + * Walk through the queue sequentially and look for the first + * sequence of fragmented chunks. + */ + + if (skb_queue_empty(&ulpq->reasm)) + return NULL; + + last_frag = first_frag = NULL; + retval = NULL; + next_tsn = 0; + is_last = 0; + + skb_queue_walk(&ulpq->reasm, pos) { + cevent = sctp_skb2event(pos); + ctsn = cevent->tsn; + + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_FIRST_FRAG: + if (!first_frag) + return NULL; + goto done; + case SCTP_DATA_MIDDLE_FRAG: + if (!first_frag) { + first_frag = pos; + next_tsn = ctsn + 1; + last_frag = pos; + } else if (next_tsn == ctsn) { + next_tsn++; + last_frag = pos; + } else + goto done; + break; + case SCTP_DATA_LAST_FRAG: + if (!first_frag) + first_frag = pos; + else if (ctsn != next_tsn) + goto done; + last_frag = pos; + is_last = 1; + goto done; + default: + return NULL; + } + } + + /* We have the reassembled event. There is no need to look + * further. + */ +done: + retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), + &ulpq->reasm, first_frag, last_frag); + if (retval && is_last) + retval->msg_flags |= MSG_EOR; + + return retval; +} + + +/* Helper function to reassemble chunks. Hold chunks on the reasm queue that + * need reassembling. + */ +static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) +{ + struct sctp_ulpevent *retval = NULL; + + /* Check if this is part of a fragmented message. */ + if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { + event->msg_flags |= MSG_EOR; + return event; + } + + sctp_ulpq_store_reasm(ulpq, event); + if (!ulpq->pd_mode) + retval = sctp_ulpq_retrieve_reassembled(ulpq); + else { + __u32 ctsn, ctsnap; + + /* Do not even bother unless this is the next tsn to + * be delivered. + */ + ctsn = event->tsn; + ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); + if (TSN_lte(ctsn, ctsnap)) + retval = sctp_ulpq_retrieve_partial(ulpq); + } + + return retval; +} + +/* Retrieve the first part (sequential fragments) for partial delivery. */ +static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) +{ + struct sk_buff *pos, *last_frag, *first_frag; + struct sctp_ulpevent *cevent; + __u32 ctsn, next_tsn; + struct sctp_ulpevent *retval; + + /* The chunks are held in the reasm queue sorted by TSN. + * Walk through the queue sequentially and look for a sequence of + * fragmented chunks that start a datagram. + */ + + if (skb_queue_empty(&ulpq->reasm)) + return NULL; + + last_frag = first_frag = NULL; + retval = NULL; + next_tsn = 0; + + skb_queue_walk(&ulpq->reasm, pos) { + cevent = sctp_skb2event(pos); + ctsn = cevent->tsn; + + switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { + case SCTP_DATA_FIRST_FRAG: + if (!first_frag) { + first_frag = pos; + next_tsn = ctsn + 1; + last_frag = pos; + } else + goto done; + break; + + case SCTP_DATA_MIDDLE_FRAG: + if (!first_frag) + return NULL; + if (ctsn == next_tsn) { + next_tsn++; + last_frag = pos; + } else + goto done; + break; + + case SCTP_DATA_LAST_FRAG: + if (!first_frag) + return NULL; + else + goto done; + break; + + default: + return NULL; + } + } + + /* We have the reassembled event. There is no need to look + * further. + */ +done: + retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), + &ulpq->reasm, first_frag, last_frag); + return retval; +} + +/* + * Flush out stale fragments from the reassembly queue when processing + * a Forward TSN. + * + * RFC 3758, Section 3.6 + * + * After receiving and processing a FORWARD TSN, the data receiver MUST + * take cautions in updating its re-assembly queue. The receiver MUST + * remove any partially reassembled message, which is still missing one + * or more TSNs earlier than or equal to the new cumulative TSN point. + * In the event that the receiver has invoked the partial delivery API, + * a notification SHOULD also be generated to inform the upper layer API + * that the message being partially delivered will NOT be completed. + */ +void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn) +{ + struct sk_buff *pos, *tmp; + struct sctp_ulpevent *event; + __u32 tsn; + + if (skb_queue_empty(&ulpq->reasm)) + return; + + skb_queue_walk_safe(&ulpq->reasm, pos, tmp) { + event = sctp_skb2event(pos); + tsn = event->tsn; + + /* Since the entire message must be abandoned by the + * sender (item A3 in Section 3.5, RFC 3758), we can + * free all fragments on the list that are less then + * or equal to ctsn_point + */ + if (TSN_lte(tsn, fwd_tsn)) { + __skb_unlink(pos, &ulpq->reasm); + sctp_ulpevent_free(event); + } else + break; + } +} + +/* + * Drain the reassembly queue. If we just cleared parted delivery, it + * is possible that the reassembly queue will contain already reassembled + * messages. Retrieve any such messages and give them to the user. + */ +static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq) +{ + struct sctp_ulpevent *event = NULL; + struct sk_buff_head temp; + + if (skb_queue_empty(&ulpq->reasm)) + return; + + while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) { + /* Do ordering if needed. */ + if ((event) && (event->msg_flags & MSG_EOR)) { + skb_queue_head_init(&temp); + __skb_queue_tail(&temp, sctp_event2skb(event)); + + event = sctp_ulpq_order(ulpq, event); + } + + /* Send event to the ULP. 'event' is the + * sctp_ulpevent for very first SKB on the temp' list. + */ + if (event) + sctp_ulpq_tail_event(ulpq, event); + } +} + + +/* Helper function to gather skbs that have possibly become + * ordered by an an incoming chunk. + */ +static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) +{ + struct sk_buff_head *event_list; + struct sk_buff *pos, *tmp; + struct sctp_ulpevent *cevent; + struct sctp_stream *in; + __u16 sid, csid, cssn; + + sid = event->stream; + in = &ulpq->asoc->ssnmap->in; + + event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; + + /* We are holding the chunks by stream, by SSN. */ + sctp_skb_for_each(pos, &ulpq->lobby, tmp) { + cevent = (struct sctp_ulpevent *) pos->cb; + csid = cevent->stream; + cssn = cevent->ssn; + + /* Have we gone too far? */ + if (csid > sid) + break; + + /* Have we not gone far enough? */ + if (csid < sid) + continue; + + if (cssn != sctp_ssn_peek(in, sid)) + break; + + /* Found it, so mark in the ssnmap. */ + sctp_ssn_next(in, sid); + + __skb_unlink(pos, &ulpq->lobby); + + /* Attach all gathered skbs to the event. */ + __skb_queue_tail(event_list, pos); + } +} + +/* Helper function to store chunks needing ordering. */ +static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) +{ + struct sk_buff *pos; + struct sctp_ulpevent *cevent; + __u16 sid, csid; + __u16 ssn, cssn; + + pos = skb_peek_tail(&ulpq->lobby); + if (!pos) { + __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); + return; + } + + sid = event->stream; + ssn = event->ssn; + + cevent = (struct sctp_ulpevent *) pos->cb; + csid = cevent->stream; + cssn = cevent->ssn; + if (sid > csid) { + __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); + return; + } + + if ((sid == csid) && SSN_lt(cssn, ssn)) { + __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); + return; + } + + /* Find the right place in this list. We store them by + * stream ID and then by SSN. + */ + skb_queue_walk(&ulpq->lobby, pos) { + cevent = (struct sctp_ulpevent *) pos->cb; + csid = cevent->stream; + cssn = cevent->ssn; + + if (csid > sid) + break; + if (csid == sid && SSN_lt(ssn, cssn)) + break; + } + + + /* Insert before pos. */ + __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event)); +} + +static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) +{ + __u16 sid, ssn; + struct sctp_stream *in; + + /* Check if this message needs ordering. */ + if (SCTP_DATA_UNORDERED & event->msg_flags) + return event; + + /* Note: The stream ID must be verified before this routine. */ + sid = event->stream; + ssn = event->ssn; + in = &ulpq->asoc->ssnmap->in; + + /* Is this the expected SSN for this stream ID? */ + if (ssn != sctp_ssn_peek(in, sid)) { + /* We've received something out of order, so find where it + * needs to be placed. We order by stream and then by SSN. + */ + sctp_ulpq_store_ordered(ulpq, event); + return NULL; + } + + /* Mark that the next chunk has been found. */ + sctp_ssn_next(in, sid); + + /* Go find any other chunks that were waiting for + * ordering. + */ + sctp_ulpq_retrieve_ordered(ulpq, event); + + return event; +} + +/* Helper function to gather skbs that have possibly become + * ordered by forward tsn skipping their dependencies. + */ +static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid) +{ + struct sk_buff *pos, *tmp; + struct sctp_ulpevent *cevent; + struct sctp_ulpevent *event; + struct sctp_stream *in; + struct sk_buff_head temp; + struct sk_buff_head *lobby = &ulpq->lobby; + __u16 csid, cssn; + + in = &ulpq->asoc->ssnmap->in; + + /* We are holding the chunks by stream, by SSN. */ + skb_queue_head_init(&temp); + event = NULL; + sctp_skb_for_each(pos, lobby, tmp) { + cevent = (struct sctp_ulpevent *) pos->cb; + csid = cevent->stream; + cssn = cevent->ssn; + + /* Have we gone too far? */ + if (csid > sid) + break; + + /* Have we not gone far enough? */ + if (csid < sid) + continue; + + /* see if this ssn has been marked by skipping */ + if (!SSN_lt(cssn, sctp_ssn_peek(in, csid))) + break; + + __skb_unlink(pos, lobby); + if (!event) + /* Create a temporary list to collect chunks on. */ + event = sctp_skb2event(pos); + + /* Attach all gathered skbs to the event. */ + __skb_queue_tail(&temp, pos); + } + + /* If we didn't reap any data, see if the next expected SSN + * is next on the queue and if so, use that. + */ + if (event == NULL && pos != (struct sk_buff *)lobby) { + cevent = (struct sctp_ulpevent *) pos->cb; + csid = cevent->stream; + cssn = cevent->ssn; + + if (csid == sid && cssn == sctp_ssn_peek(in, csid)) { + sctp_ssn_next(in, csid); + __skb_unlink(pos, lobby); + __skb_queue_tail(&temp, pos); + event = sctp_skb2event(pos); + } + } + + /* Send event to the ULP. 'event' is the sctp_ulpevent for + * very first SKB on the 'temp' list. + */ + if (event) { + /* see if we have more ordered that we can deliver */ + sctp_ulpq_retrieve_ordered(ulpq, event); + sctp_ulpq_tail_event(ulpq, event); + } +} + +/* Skip over an SSN. This is used during the processing of + * Forwared TSN chunk to skip over the abandoned ordered data + */ +void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) +{ + struct sctp_stream *in; + + /* Note: The stream ID must be verified before this routine. */ + in = &ulpq->asoc->ssnmap->in; + + /* Is this an old SSN? If so ignore. */ + if (SSN_lt(ssn, sctp_ssn_peek(in, sid))) + return; + + /* Mark that we are no longer expecting this SSN or lower. */ + sctp_ssn_skip(in, sid, ssn); + + /* Go find any other chunks that were waiting for + * ordering and deliver them if needed. + */ + sctp_ulpq_reap_ordered(ulpq, sid); +} + +static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, + struct sk_buff_head *list, __u16 needed) +{ + __u16 freed = 0; + __u32 tsn, last_tsn; + struct sk_buff *skb, *flist, *last; + struct sctp_ulpevent *event; + struct sctp_tsnmap *tsnmap; + + tsnmap = &ulpq->asoc->peer.tsn_map; + + while ((skb = skb_peek_tail(list)) != NULL) { + event = sctp_skb2event(skb); + tsn = event->tsn; + + /* Don't renege below the Cumulative TSN ACK Point. */ + if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap))) + break; + + /* Events in ordering queue may have multiple fragments + * corresponding to additional TSNs. Sum the total + * freed space; find the last TSN. + */ + freed += skb_headlen(skb); + flist = skb_shinfo(skb)->frag_list; + for (last = flist; flist; flist = flist->next) { + last = flist; + freed += skb_headlen(last); + } + if (last) + last_tsn = sctp_skb2event(last)->tsn; + else + last_tsn = tsn; + + /* Unlink the event, then renege all applicable TSNs. */ + __skb_unlink(skb, list); + sctp_ulpevent_free(event); + while (TSN_lte(tsn, last_tsn)) { + sctp_tsnmap_renege(tsnmap, tsn); + tsn++; + } + if (freed >= needed) + return freed; + } + + return freed; +} + +/* Renege 'needed' bytes from the ordering queue. */ +static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) +{ + return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); +} + +/* Renege 'needed' bytes from the reassembly queue. */ +static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) +{ + return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed); +} + +/* Partial deliver the first message as there is pressure on rwnd. */ +void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, + gfp_t gfp) +{ + struct sctp_ulpevent *event; + struct sctp_association *asoc; + struct sctp_sock *sp; + __u32 ctsn; + struct sk_buff *skb; + + asoc = ulpq->asoc; + sp = sctp_sk(asoc->base.sk); + + /* If the association is already in Partial Delivery mode + * we have nothing to do. + */ + if (ulpq->pd_mode) + return; + + /* Data must be at or below the Cumulative TSN ACK Point to + * start partial delivery. + */ + skb = skb_peek(&asoc->ulpq.reasm); + if (skb != NULL) { + ctsn = sctp_skb2event(skb)->tsn; + if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map))) + return; + } + + /* If the user enabled fragment interleave socket option, + * multiple associations can enter partial delivery. + * Otherwise, we can only enter partial delivery if the + * socket is not in partial deliver mode. + */ + if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) { + /* Is partial delivery possible? */ + event = sctp_ulpq_retrieve_first(ulpq); + /* Send event to the ULP. */ + if (event) { + sctp_ulpq_tail_event(ulpq, event); + sctp_ulpq_set_pd(ulpq); + return; + } + } +} + +/* Renege some packets to make room for an incoming chunk. */ +void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, + gfp_t gfp) +{ + struct sctp_association *asoc; + __u16 needed, freed; + + asoc = ulpq->asoc; + + if (chunk) { + needed = ntohs(chunk->chunk_hdr->length); + needed -= sizeof(sctp_data_chunk_t); + } else + needed = SCTP_DEFAULT_MAXWINDOW; + + freed = 0; + + if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { + freed = sctp_ulpq_renege_order(ulpq, needed); + if (freed < needed) { + freed += sctp_ulpq_renege_frags(ulpq, needed - freed); + } + } + /* If able to free enough room, accept this chunk. */ + if (chunk && (freed >= needed)) { + int retval; + retval = sctp_ulpq_tail_data(ulpq, chunk, gfp); + /* + * Enter partial delivery if chunk has not been + * delivered; otherwise, drain the reassembly queue. + */ + if (retval <= 0) + sctp_ulpq_partial_delivery(ulpq, gfp); + else if (retval == 1) + sctp_ulpq_reasm_drain(ulpq); + } + + sk_mem_reclaim(asoc->base.sk); +} + + + +/* Notify the application if an association is aborted and in + * partial delivery mode. Send up any pending received messages. + */ +void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) +{ + struct sctp_ulpevent *ev = NULL; + struct sock *sk; + + if (!ulpq->pd_mode) + return; + + sk = ulpq->asoc->base.sk; + if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, + &sctp_sk(sk)->subscribe)) + ev = sctp_ulpevent_make_pdapi(ulpq->asoc, + SCTP_PARTIAL_DELIVERY_ABORTED, + gfp); + if (ev) + __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); + + /* If there is data waiting, send it up the socket now. */ + if (sctp_ulpq_clear_pd(ulpq) || ev) + sk->sk_data_ready(sk); +} |