1 |
1275 |
phoenix |
/* SCTP kernel reference Implementation
|
2 |
|
|
* (C) Copyright IBM Corp. 2001, 2004
|
3 |
|
|
* Copyright (c) 1999 Cisco, Inc.
|
4 |
|
|
* Copyright (c) 1999-2001 Motorola, Inc.
|
5 |
|
|
*
|
6 |
|
|
* This file is part of the SCTP kernel reference Implementation
|
7 |
|
|
*
|
8 |
|
|
* These functions work with the state functions in sctp_sm_statefuns.c
|
9 |
|
|
* to implement that state operations. These functions implement the
|
10 |
|
|
* steps which require modifying existing data structures.
|
11 |
|
|
*
|
12 |
|
|
* The SCTP reference implementation is free software;
|
13 |
|
|
* you can redistribute it and/or modify it under the terms of
|
14 |
|
|
* the GNU General Public License as published by
|
15 |
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
16 |
|
|
* any later version.
|
17 |
|
|
*
|
18 |
|
|
* The SCTP reference implementation is distributed in the hope that it
|
19 |
|
|
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
|
20 |
|
|
* ************************
|
21 |
|
|
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
22 |
|
|
* See the GNU General Public License for more details.
|
23 |
|
|
*
|
24 |
|
|
* You should have received a copy of the GNU General Public License
|
25 |
|
|
* along with GNU CC; see the file COPYING. If not, write to
|
26 |
|
|
* the Free Software Foundation, 59 Temple Place - Suite 330,
|
27 |
|
|
* Boston, MA 02111-1307, USA.
|
28 |
|
|
*
|
29 |
|
|
* Please send any bug reports or fixes you make to the
|
30 |
|
|
* email address(es):
|
31 |
|
|
* lksctp developers <lksctp-developers@lists.sourceforge.net>
|
32 |
|
|
*
|
33 |
|
|
* Or submit a bug report through the following website:
|
34 |
|
|
* http://www.sf.net/projects/lksctp
|
35 |
|
|
*
|
36 |
|
|
* Written or modified by:
|
37 |
|
|
* La Monte H.P. Yarroll <piggy@acm.org>
|
38 |
|
|
* Karl Knutson <karl@athena.chicago.il.us>
|
39 |
|
|
* Jon Grimm <jgrimm@austin.ibm.com>
|
40 |
|
|
* Hui Huang <hui.huang@nokia.com>
|
41 |
|
|
* Dajiang Zhang <dajiang.zhang@nokia.com>
|
42 |
|
|
* Daisy Chang <daisyc@us.ibm.com>
|
43 |
|
|
* Sridhar Samudrala <sri@us.ibm.com>
|
44 |
|
|
* Ardelle Fan <ardelle.fan@intel.com>
|
45 |
|
|
*
|
46 |
|
|
* Any bugs reported given to us we will try to fix... any fixes shared will
|
47 |
|
|
* be incorporated into the next SCTP release.
|
48 |
|
|
*/
|
49 |
|
|
|
50 |
|
|
#include <linux/skbuff.h>
|
51 |
|
|
#include <linux/types.h>
|
52 |
|
|
#include <linux/socket.h>
|
53 |
|
|
#include <linux/ip.h>
|
54 |
|
|
#include <net/sock.h>
|
55 |
|
|
#include <net/sctp/sctp.h>
|
56 |
|
|
#include <net/sctp/sm.h>
|
57 |
|
|
|
58 |
|
|
/********************************************************************
|
59 |
|
|
* Helper functions
|
60 |
|
|
********************************************************************/
|
61 |
|
|
|
62 |
|
|
/* A helper function for delayed processing of INET ECN CE bit. */
|
63 |
|
|
static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
|
64 |
|
|
__u32 lowest_tsn)
|
65 |
|
|
{
|
66 |
|
|
/* Save the TSN away for comparison when we receive CWR */
|
67 |
|
|
|
68 |
|
|
asoc->last_ecne_tsn = lowest_tsn;
|
69 |
|
|
asoc->need_ecne = 1;
|
70 |
|
|
}
|
71 |
|
|
|
72 |
|
|
/* Helper function for delayed processing of SCTP ECNE chunk. */
|
73 |
|
|
/* RFC 2960 Appendix A
|
74 |
|
|
*
|
75 |
|
|
* RFC 2481 details a specific bit for a sender to send in
|
76 |
|
|
* the header of its next outbound TCP segment to indicate to
|
77 |
|
|
* its peer that it has reduced its congestion window. This
|
78 |
|
|
* is termed the CWR bit. For SCTP the same indication is made
|
79 |
|
|
* by including the CWR chunk. This chunk contains one data
|
80 |
|
|
* element, i.e. the TSN number that was sent in the ECNE chunk.
|
81 |
|
|
* This element represents the lowest TSN number in the datagram
|
82 |
|
|
* that was originally marked with the CE bit.
|
83 |
|
|
*/
|
84 |
|
|
static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
|
85 |
|
|
__u32 lowest_tsn,
|
86 |
|
|
struct sctp_chunk *chunk)
|
87 |
|
|
{
|
88 |
|
|
struct sctp_chunk *repl;
|
89 |
|
|
|
90 |
|
|
/* Our previously transmitted packet ran into some congestion
|
91 |
|
|
* so we should take action by reducing cwnd and ssthresh
|
92 |
|
|
* and then ACK our peer that we we've done so by
|
93 |
|
|
* sending a CWR.
|
94 |
|
|
*/
|
95 |
|
|
|
96 |
|
|
/* First, try to determine if we want to actually lower
|
97 |
|
|
* our cwnd variables. Only lower them if the ECNE looks more
|
98 |
|
|
* recent than the last response.
|
99 |
|
|
*/
|
100 |
|
|
if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
|
101 |
|
|
struct sctp_transport *transport;
|
102 |
|
|
|
103 |
|
|
/* Find which transport's congestion variables
|
104 |
|
|
* need to be adjusted.
|
105 |
|
|
*/
|
106 |
|
|
transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
|
107 |
|
|
|
108 |
|
|
/* Update the congestion variables. */
|
109 |
|
|
if (transport)
|
110 |
|
|
sctp_transport_lower_cwnd(transport,
|
111 |
|
|
SCTP_LOWER_CWND_ECNE);
|
112 |
|
|
asoc->last_cwr_tsn = lowest_tsn;
|
113 |
|
|
}
|
114 |
|
|
|
115 |
|
|
/* Always try to quiet the other end. In case of lost CWR,
|
116 |
|
|
* resend last_cwr_tsn.
|
117 |
|
|
*/
|
118 |
|
|
repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
|
119 |
|
|
|
120 |
|
|
/* If we run out of memory, it will look like a lost CWR. We'll
|
121 |
|
|
* get back in sync eventually.
|
122 |
|
|
*/
|
123 |
|
|
return repl;
|
124 |
|
|
}
|
125 |
|
|
|
126 |
|
|
/* Helper function to do delayed processing of ECN CWR chunk. */
|
127 |
|
|
static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
|
128 |
|
|
__u32 lowest_tsn)
|
129 |
|
|
{
|
130 |
|
|
/* Turn off ECNE getting auto-prepended to every outgoing
|
131 |
|
|
* packet
|
132 |
|
|
*/
|
133 |
|
|
asoc->need_ecne = 0;
|
134 |
|
|
}
|
135 |
|
|
|
136 |
|
|
/* Generate SACK if necessary. We call this at the end of a packet. */
|
137 |
|
|
int sctp_gen_sack(struct sctp_association *asoc, int force,
|
138 |
|
|
sctp_cmd_seq_t *commands)
|
139 |
|
|
{
|
140 |
|
|
__u32 ctsn, max_tsn_seen;
|
141 |
|
|
struct sctp_chunk *sack;
|
142 |
|
|
int error = 0;
|
143 |
|
|
|
144 |
|
|
if (force)
|
145 |
|
|
asoc->peer.sack_needed = 1;
|
146 |
|
|
|
147 |
|
|
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
|
148 |
|
|
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
|
149 |
|
|
|
150 |
|
|
/* From 12.2 Parameters necessary per association (i.e. the TCB):
|
151 |
|
|
*
|
152 |
|
|
* Ack State : This flag indicates if the next received packet
|
153 |
|
|
* : is to be responded to with a SACK. ...
|
154 |
|
|
* : When DATA chunks are out of order, SACK's
|
155 |
|
|
* : are not delayed (see Section 6).
|
156 |
|
|
*
|
157 |
|
|
* [This is actually not mentioned in Section 6, but we
|
158 |
|
|
* implement it here anyway. --piggy]
|
159 |
|
|
*/
|
160 |
|
|
if (max_tsn_seen != ctsn)
|
161 |
|
|
asoc->peer.sack_needed = 1;
|
162 |
|
|
|
163 |
|
|
/* From 6.2 Acknowledgement on Reception of DATA Chunks:
|
164 |
|
|
*
|
165 |
|
|
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
|
166 |
|
|
* an acknowledgement SHOULD be generated for at least every
|
167 |
|
|
* second packet (not every second DATA chunk) received, and
|
168 |
|
|
* SHOULD be generated within 200 ms of the arrival of any
|
169 |
|
|
* unacknowledged DATA chunk. ...
|
170 |
|
|
*/
|
171 |
|
|
if (!asoc->peer.sack_needed) {
|
172 |
|
|
/* We will need a SACK for the next packet. */
|
173 |
|
|
asoc->peer.sack_needed = 1;
|
174 |
|
|
goto out;
|
175 |
|
|
} else {
|
176 |
|
|
if (asoc->a_rwnd > asoc->rwnd)
|
177 |
|
|
asoc->a_rwnd = asoc->rwnd;
|
178 |
|
|
sack = sctp_make_sack(asoc);
|
179 |
|
|
if (!sack)
|
180 |
|
|
goto nomem;
|
181 |
|
|
|
182 |
|
|
asoc->peer.sack_needed = 0;
|
183 |
|
|
|
184 |
|
|
error = sctp_outq_tail(&asoc->outqueue, sack);
|
185 |
|
|
|
186 |
|
|
/* Stop the SACK timer. */
|
187 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
|
188 |
|
|
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
|
189 |
|
|
}
|
190 |
|
|
out:
|
191 |
|
|
return error;
|
192 |
|
|
nomem:
|
193 |
|
|
error = -ENOMEM;
|
194 |
|
|
return error;
|
195 |
|
|
}
|
196 |
|
|
|
197 |
|
|
/* When the T3-RTX timer expires, it calls this function to create the
|
198 |
|
|
* relevant state machine event.
|
199 |
|
|
*/
|
200 |
|
|
void sctp_generate_t3_rtx_event(unsigned long peer)
|
201 |
|
|
{
|
202 |
|
|
int error;
|
203 |
|
|
struct sctp_transport *transport = (struct sctp_transport *) peer;
|
204 |
|
|
struct sctp_association *asoc = transport->asoc;
|
205 |
|
|
|
206 |
|
|
/* Check whether a task is in the sock. */
|
207 |
|
|
|
208 |
|
|
sctp_bh_lock_sock(asoc->base.sk);
|
209 |
|
|
if (sock_owned_by_user(asoc->base.sk)) {
|
210 |
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__);
|
211 |
|
|
|
212 |
|
|
/* Try again later. */
|
213 |
|
|
if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
|
214 |
|
|
sctp_transport_hold(transport);
|
215 |
|
|
goto out_unlock;
|
216 |
|
|
}
|
217 |
|
|
|
218 |
|
|
/* Is this transport really dead and just waiting around for
|
219 |
|
|
* the timer to let go of the reference?
|
220 |
|
|
*/
|
221 |
|
|
if (transport->dead)
|
222 |
|
|
goto out_unlock;
|
223 |
|
|
|
224 |
|
|
/* Run through the state machine. */
|
225 |
|
|
error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
|
226 |
|
|
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
|
227 |
|
|
asoc->state,
|
228 |
|
|
asoc->ep, asoc,
|
229 |
|
|
transport, GFP_ATOMIC);
|
230 |
|
|
|
231 |
|
|
if (error)
|
232 |
|
|
asoc->base.sk->sk_err = -error;
|
233 |
|
|
|
234 |
|
|
out_unlock:
|
235 |
|
|
sctp_bh_unlock_sock(asoc->base.sk);
|
236 |
|
|
sctp_transport_put(transport);
|
237 |
|
|
}
|
238 |
|
|
|
239 |
|
|
/* This is a sa interface for producing timeout events. It works
|
240 |
|
|
* for timeouts which use the association as their parameter.
|
241 |
|
|
*/
|
242 |
|
|
static void sctp_generate_timeout_event(struct sctp_association *asoc,
|
243 |
|
|
sctp_event_timeout_t timeout_type)
|
244 |
|
|
{
|
245 |
|
|
int error = 0;
|
246 |
|
|
|
247 |
|
|
sctp_bh_lock_sock(asoc->base.sk);
|
248 |
|
|
if (sock_owned_by_user(asoc->base.sk)) {
|
249 |
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n",
|
250 |
|
|
__FUNCTION__,
|
251 |
|
|
timeout_type);
|
252 |
|
|
|
253 |
|
|
/* Try again later. */
|
254 |
|
|
if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
|
255 |
|
|
sctp_association_hold(asoc);
|
256 |
|
|
goto out_unlock;
|
257 |
|
|
}
|
258 |
|
|
|
259 |
|
|
/* Is this association really dead and just waiting around for
|
260 |
|
|
* the timer to let go of the reference?
|
261 |
|
|
*/
|
262 |
|
|
if (asoc->base.dead)
|
263 |
|
|
goto out_unlock;
|
264 |
|
|
|
265 |
|
|
/* Run through the state machine. */
|
266 |
|
|
error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
|
267 |
|
|
SCTP_ST_TIMEOUT(timeout_type),
|
268 |
|
|
asoc->state, asoc->ep, asoc,
|
269 |
|
|
(void *)timeout_type, GFP_ATOMIC);
|
270 |
|
|
|
271 |
|
|
if (error)
|
272 |
|
|
asoc->base.sk->sk_err = -error;
|
273 |
|
|
|
274 |
|
|
out_unlock:
|
275 |
|
|
sctp_bh_unlock_sock(asoc->base.sk);
|
276 |
|
|
sctp_association_put(asoc);
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
void sctp_generate_t1_cookie_event(unsigned long data)
|
280 |
|
|
{
|
281 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
282 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
|
283 |
|
|
}
|
284 |
|
|
|
285 |
|
|
void sctp_generate_t1_init_event(unsigned long data)
|
286 |
|
|
{
|
287 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
288 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
void sctp_generate_t2_shutdown_event(unsigned long data)
|
292 |
|
|
{
|
293 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
294 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
|
295 |
|
|
}
|
296 |
|
|
|
297 |
|
|
void sctp_generate_t4_rto_event(unsigned long data)
|
298 |
|
|
{
|
299 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
300 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
|
301 |
|
|
}
|
302 |
|
|
|
303 |
|
|
void sctp_generate_t5_shutdown_guard_event(unsigned long data)
|
304 |
|
|
{
|
305 |
|
|
struct sctp_association *asoc = (struct sctp_association *)data;
|
306 |
|
|
sctp_generate_timeout_event(asoc,
|
307 |
|
|
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
|
308 |
|
|
|
309 |
|
|
} /* sctp_generate_t5_shutdown_guard_event() */
|
310 |
|
|
|
311 |
|
|
void sctp_generate_autoclose_event(unsigned long data)
|
312 |
|
|
{
|
313 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
314 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
|
315 |
|
|
}
|
316 |
|
|
|
317 |
|
|
/* Generate a heart beat event. If the sock is busy, reschedule. Make
|
318 |
|
|
* sure that the transport is still valid.
|
319 |
|
|
*/
|
320 |
|
|
void sctp_generate_heartbeat_event(unsigned long data)
|
321 |
|
|
{
|
322 |
|
|
int error = 0;
|
323 |
|
|
struct sctp_transport *transport = (struct sctp_transport *) data;
|
324 |
|
|
struct sctp_association *asoc = transport->asoc;
|
325 |
|
|
|
326 |
|
|
sctp_bh_lock_sock(asoc->base.sk);
|
327 |
|
|
if (sock_owned_by_user(asoc->base.sk)) {
|
328 |
|
|
SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__);
|
329 |
|
|
|
330 |
|
|
/* Try again later. */
|
331 |
|
|
if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
|
332 |
|
|
sctp_transport_hold(transport);
|
333 |
|
|
goto out_unlock;
|
334 |
|
|
}
|
335 |
|
|
|
336 |
|
|
/* Is this structure just waiting around for us to actually
|
337 |
|
|
* get destroyed?
|
338 |
|
|
*/
|
339 |
|
|
if (transport->dead)
|
340 |
|
|
goto out_unlock;
|
341 |
|
|
|
342 |
|
|
error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT,
|
343 |
|
|
SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
|
344 |
|
|
asoc->state, asoc->ep, asoc,
|
345 |
|
|
transport, GFP_ATOMIC);
|
346 |
|
|
|
347 |
|
|
if (error)
|
348 |
|
|
asoc->base.sk->sk_err = -error;
|
349 |
|
|
|
350 |
|
|
out_unlock:
|
351 |
|
|
sctp_bh_unlock_sock(asoc->base.sk);
|
352 |
|
|
sctp_transport_put(transport);
|
353 |
|
|
}
|
354 |
|
|
|
355 |
|
|
/* Inject a SACK Timeout event into the state machine. */
|
356 |
|
|
void sctp_generate_sack_event(unsigned long data)
|
357 |
|
|
{
|
358 |
|
|
struct sctp_association *asoc = (struct sctp_association *) data;
|
359 |
|
|
sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
|
360 |
|
|
}
|
361 |
|
|
|
362 |
|
|
sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
|
363 |
|
|
NULL,
|
364 |
|
|
sctp_generate_t1_cookie_event,
|
365 |
|
|
sctp_generate_t1_init_event,
|
366 |
|
|
sctp_generate_t2_shutdown_event,
|
367 |
|
|
NULL,
|
368 |
|
|
sctp_generate_t4_rto_event,
|
369 |
|
|
sctp_generate_t5_shutdown_guard_event,
|
370 |
|
|
sctp_generate_heartbeat_event,
|
371 |
|
|
sctp_generate_sack_event,
|
372 |
|
|
sctp_generate_autoclose_event,
|
373 |
|
|
};
|
374 |
|
|
|
375 |
|
|
|
376 |
|
|
/* RFC 2960 8.2 Path Failure Detection
|
377 |
|
|
*
|
378 |
|
|
* When its peer endpoint is multi-homed, an endpoint should keep a
|
379 |
|
|
* error counter for each of the destination transport addresses of the
|
380 |
|
|
* peer endpoint.
|
381 |
|
|
*
|
382 |
|
|
* Each time the T3-rtx timer expires on any address, or when a
|
383 |
|
|
* HEARTBEAT sent to an idle address is not acknowledged within a RTO,
|
384 |
|
|
* the error counter of that destination address will be incremented.
|
385 |
|
|
* When the value in the error counter exceeds the protocol parameter
|
386 |
|
|
* 'Path.Max.Retrans' of that destination address, the endpoint should
|
387 |
|
|
* mark the destination transport address as inactive, and a
|
388 |
|
|
* notification SHOULD be sent to the upper layer.
|
389 |
|
|
*
|
390 |
|
|
*/
|
391 |
|
|
static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
|
392 |
|
|
struct sctp_transport *transport)
|
393 |
|
|
{
|
394 |
|
|
/* The check for association's overall error counter exceeding the
|
395 |
|
|
* threshold is done in the state function.
|
396 |
|
|
*/
|
397 |
|
|
asoc->overall_error_count++;
|
398 |
|
|
|
399 |
|
|
if (transport->active &&
|
400 |
|
|
(transport->error_count++ >= transport->error_threshold)) {
|
401 |
|
|
SCTP_DEBUG_PRINTK("transport_strike: transport "
|
402 |
|
|
"IP:%d.%d.%d.%d failed.\n",
|
403 |
|
|
NIPQUAD(transport->ipaddr.v4.sin_addr));
|
404 |
|
|
sctp_assoc_control_transport(asoc, transport,
|
405 |
|
|
SCTP_TRANSPORT_DOWN,
|
406 |
|
|
SCTP_FAILED_THRESHOLD);
|
407 |
|
|
}
|
408 |
|
|
|
409 |
|
|
/* E2) For the destination address for which the timer
|
410 |
|
|
* expires, set RTO <- RTO * 2 ("back off the timer"). The
|
411 |
|
|
* maximum value discussed in rule C7 above (RTO.max) may be
|
412 |
|
|
* used to provide an upper bound to this doubling operation.
|
413 |
|
|
*/
|
414 |
|
|
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
|
415 |
|
|
}
|
416 |
|
|
|
417 |
|
|
/* Worker routine to handle INIT command failure. */
|
418 |
|
|
static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
|
419 |
|
|
struct sctp_association *asoc,
|
420 |
|
|
unsigned error)
|
421 |
|
|
{
|
422 |
|
|
struct sctp_ulpevent *event;
|
423 |
|
|
|
424 |
|
|
event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC,
|
425 |
|
|
(__u16)error, 0, 0,
|
426 |
|
|
GFP_ATOMIC);
|
427 |
|
|
|
428 |
|
|
if (event)
|
429 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
|
430 |
|
|
SCTP_ULPEVENT(event));
|
431 |
|
|
|
432 |
|
|
/* SEND_FAILED sent later when cleaning up the association. */
|
433 |
|
|
asoc->outqueue.error = error;
|
434 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
|
435 |
|
|
}
|
436 |
|
|
|
437 |
|
|
/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
|
438 |
|
|
static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
|
439 |
|
|
struct sctp_association *asoc,
|
440 |
|
|
sctp_event_t event_type,
|
441 |
|
|
sctp_subtype_t subtype,
|
442 |
|
|
struct sctp_chunk *chunk,
|
443 |
|
|
unsigned error)
|
444 |
|
|
{
|
445 |
|
|
struct sctp_ulpevent *event;
|
446 |
|
|
|
447 |
|
|
/* Cancel any partial delivery in progress. */
|
448 |
|
|
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
|
449 |
|
|
|
450 |
|
|
event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
|
451 |
|
|
(__u16)error, 0, 0,
|
452 |
|
|
GFP_ATOMIC);
|
453 |
|
|
if (event)
|
454 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
|
455 |
|
|
SCTP_ULPEVENT(event));
|
456 |
|
|
|
457 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
|
458 |
|
|
SCTP_STATE(SCTP_STATE_CLOSED));
|
459 |
|
|
|
460 |
|
|
/* SEND_FAILED sent later when cleaning up the association. */
|
461 |
|
|
asoc->outqueue.error = error;
|
462 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
|
463 |
|
|
}
|
464 |
|
|
|
465 |
|
|
/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
|
466 |
|
|
* inside the cookie. In reality, this is only used for INIT-ACK processing
|
467 |
|
|
* since all other cases use "temporary" associations and can do all
|
468 |
|
|
* their work in statefuns directly.
|
469 |
|
|
*/
|
470 |
|
|
static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
|
471 |
|
|
struct sctp_association *asoc,
|
472 |
|
|
struct sctp_chunk *chunk,
|
473 |
|
|
sctp_init_chunk_t *peer_init, int gfp)
|
474 |
|
|
{
|
475 |
|
|
int error;
|
476 |
|
|
|
477 |
|
|
/* We only process the init as a sideeffect in a single
|
478 |
|
|
* case. This is when we process the INIT-ACK. If we
|
479 |
|
|
* fail during INIT processing (due to malloc problems),
|
480 |
|
|
* just return the error and stop processing the stack.
|
481 |
|
|
*/
|
482 |
|
|
if (!sctp_process_init(asoc, chunk->chunk_hdr->type,
|
483 |
|
|
sctp_source(chunk), peer_init, gfp))
|
484 |
|
|
error = -ENOMEM;
|
485 |
|
|
else
|
486 |
|
|
error = 0;
|
487 |
|
|
|
488 |
|
|
return error;
|
489 |
|
|
}
|
490 |
|
|
|
491 |
|
|
/* Helper function to break out starting up of heartbeat timers. */
|
492 |
|
|
static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds,
|
493 |
|
|
struct sctp_association *asoc)
|
494 |
|
|
{
|
495 |
|
|
struct sctp_transport *t;
|
496 |
|
|
struct list_head *pos;
|
497 |
|
|
|
498 |
|
|
/* Start a heartbeat timer for each transport on the association.
|
499 |
|
|
* hold a reference on the transport to make sure none of
|
500 |
|
|
* the needed data structures go away.
|
501 |
|
|
*/
|
502 |
|
|
list_for_each(pos, &asoc->peer.transport_addr_list) {
|
503 |
|
|
t = list_entry(pos, struct sctp_transport, transports);
|
504 |
|
|
|
505 |
|
|
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
|
506 |
|
|
sctp_transport_hold(t);
|
507 |
|
|
}
|
508 |
|
|
}
|
509 |
|
|
|
510 |
|
|
static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds,
|
511 |
|
|
struct sctp_association *asoc)
|
512 |
|
|
{
|
513 |
|
|
struct sctp_transport *t;
|
514 |
|
|
struct list_head *pos;
|
515 |
|
|
|
516 |
|
|
/* Stop all heartbeat timers. */
|
517 |
|
|
|
518 |
|
|
list_for_each(pos, &asoc->peer.transport_addr_list) {
|
519 |
|
|
t = list_entry(pos, struct sctp_transport, transports);
|
520 |
|
|
if (del_timer(&t->hb_timer))
|
521 |
|
|
sctp_transport_put(t);
|
522 |
|
|
}
|
523 |
|
|
}
|
524 |
|
|
|
525 |
|
|
/* Helper function to update the heartbeat timer. */
|
526 |
|
|
static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
|
527 |
|
|
struct sctp_association *asoc,
|
528 |
|
|
struct sctp_transport *t)
|
529 |
|
|
{
|
530 |
|
|
/* Update the heartbeat timer. */
|
531 |
|
|
if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t)))
|
532 |
|
|
sctp_transport_hold(t);
|
533 |
|
|
}
|
534 |
|
|
|
535 |
|
|
/* Helper function to handle the reception of an HEARTBEAT ACK. */
|
536 |
|
|
static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
|
537 |
|
|
struct sctp_association *asoc,
|
538 |
|
|
struct sctp_transport *t,
|
539 |
|
|
struct sctp_chunk *chunk)
|
540 |
|
|
{
|
541 |
|
|
sctp_sender_hb_info_t *hbinfo;
|
542 |
|
|
|
543 |
|
|
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
|
544 |
|
|
* HEARTBEAT should clear the error counter of the destination
|
545 |
|
|
* transport address to which the HEARTBEAT was sent.
|
546 |
|
|
* The association's overall error count is also cleared.
|
547 |
|
|
*/
|
548 |
|
|
t->error_count = 0;
|
549 |
|
|
t->asoc->overall_error_count = 0;
|
550 |
|
|
|
551 |
|
|
/* Mark the destination transport address as active if it is not so
|
552 |
|
|
* marked.
|
553 |
|
|
*/
|
554 |
|
|
if (!t->active)
|
555 |
|
|
sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
|
556 |
|
|
SCTP_HEARTBEAT_SUCCESS);
|
557 |
|
|
|
558 |
|
|
/* The receiver of the HEARTBEAT ACK should also perform an
|
559 |
|
|
* RTT measurement for that destination transport address
|
560 |
|
|
* using the time value carried in the HEARTBEAT ACK chunk.
|
561 |
|
|
*/
|
562 |
|
|
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
|
563 |
|
|
sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
|
564 |
|
|
}
|
565 |
|
|
|
566 |
|
|
/* Helper function to do a transport reset at the expiry of the hearbeat
|
567 |
|
|
* timer.
|
568 |
|
|
*/
|
569 |
|
|
static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds,
|
570 |
|
|
struct sctp_association *asoc,
|
571 |
|
|
struct sctp_transport *t)
|
572 |
|
|
{
|
573 |
|
|
sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
|
574 |
|
|
|
575 |
|
|
/* Mark one strike against a transport. */
|
576 |
|
|
sctp_do_8_2_transport_strike(asoc, t);
|
577 |
|
|
}
|
578 |
|
|
|
579 |
|
|
/* Helper function to process the process SACK command. */
|
580 |
|
|
static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds,
|
581 |
|
|
struct sctp_association *asoc,
|
582 |
|
|
sctp_sackhdr_t *sackh)
|
583 |
|
|
{
|
584 |
|
|
int err;
|
585 |
|
|
|
586 |
|
|
if (sctp_outq_sack(&asoc->outqueue, sackh)) {
|
587 |
|
|
/* There are no more TSNs awaiting SACK. */
|
588 |
|
|
err = sctp_do_sm(SCTP_EVENT_T_OTHER,
|
589 |
|
|
SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
|
590 |
|
|
asoc->state, asoc->ep, asoc, NULL,
|
591 |
|
|
GFP_ATOMIC);
|
592 |
|
|
} else {
|
593 |
|
|
/* Windows may have opened, so we need
|
594 |
|
|
* to check if we have DATA to transmit
|
595 |
|
|
*/
|
596 |
|
|
err = sctp_outq_flush(&asoc->outqueue, 0);
|
597 |
|
|
}
|
598 |
|
|
|
599 |
|
|
return err;
|
600 |
|
|
}
|
601 |
|
|
|
602 |
|
|
/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
|
603 |
|
|
* the transport for a shutdown chunk.
|
604 |
|
|
*/
|
605 |
|
|
static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds,
|
606 |
|
|
struct sctp_association *asoc,
|
607 |
|
|
struct sctp_chunk *chunk)
|
608 |
|
|
{
|
609 |
|
|
struct sctp_transport *t;
|
610 |
|
|
|
611 |
|
|
t = sctp_assoc_choose_shutdown_transport(asoc);
|
612 |
|
|
asoc->shutdown_last_sent_to = t;
|
613 |
|
|
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
|
614 |
|
|
chunk->transport = t;
|
615 |
|
|
}
|
616 |
|
|
|
617 |
|
|
/* Helper function to change the state of an association. */
|
618 |
|
|
static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds,
|
619 |
|
|
struct sctp_association *asoc,
|
620 |
|
|
sctp_state_t state)
|
621 |
|
|
{
|
622 |
|
|
struct sock *sk = asoc->base.sk;
|
623 |
|
|
|
624 |
|
|
asoc->state = state;
|
625 |
|
|
|
626 |
|
|
if (sctp_style(sk, TCP)) {
|
627 |
|
|
/* Change the sk->sk_state of a TCP-style socket that has
|
628 |
|
|
* sucessfully completed a connect() call.
|
629 |
|
|
*/
|
630 |
|
|
if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
|
631 |
|
|
sk->sk_state = SCTP_SS_ESTABLISHED;
|
632 |
|
|
|
633 |
|
|
/* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
|
634 |
|
|
if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
|
635 |
|
|
sctp_sstate(sk, ESTABLISHED))
|
636 |
|
|
sk->sk_shutdown |= RCV_SHUTDOWN;
|
637 |
|
|
}
|
638 |
|
|
|
639 |
|
|
if (sctp_state(asoc, ESTABLISHED) ||
|
640 |
|
|
sctp_state(asoc, CLOSED) ||
|
641 |
|
|
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
|
642 |
|
|
/* Wake up any processes waiting in the asoc's wait queue in
|
643 |
|
|
* sctp_wait_for_connect() or sctp_wait_for_sndbuf().
|
644 |
|
|
*/
|
645 |
|
|
if (waitqueue_active(&asoc->wait))
|
646 |
|
|
wake_up_interruptible(&asoc->wait);
|
647 |
|
|
|
648 |
|
|
/* Wake up any processes waiting in the sk's sleep queue of
|
649 |
|
|
* a TCP-style or UDP-style peeled-off socket in
|
650 |
|
|
* sctp_wait_for_accept() or sctp_wait_for_packet().
|
651 |
|
|
* For a UDP-style socket, the waiters are woken up by the
|
652 |
|
|
* notifications.
|
653 |
|
|
*/
|
654 |
|
|
if (!sctp_style(sk, UDP))
|
655 |
|
|
sk->sk_state_change(sk);
|
656 |
|
|
}
|
657 |
|
|
}
|
658 |
|
|
|
659 |
|
|
/* Helper function to delete an association. */
|
660 |
|
|
static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
|
661 |
|
|
struct sctp_association *asoc)
|
662 |
|
|
{
|
663 |
|
|
struct sock *sk = asoc->base.sk;
|
664 |
|
|
|
665 |
|
|
/* If it is a non-temporary association belonging to a TCP-style
|
666 |
|
|
* listening socket that is not closed, do not free it so that accept()
|
667 |
|
|
* can pick it up later.
|
668 |
|
|
*/
|
669 |
|
|
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
|
670 |
|
|
(!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
|
671 |
|
|
return;
|
672 |
|
|
|
673 |
|
|
sctp_unhash_established(asoc);
|
674 |
|
|
sctp_association_free(asoc);
|
675 |
|
|
}
|
676 |
|
|
|
677 |
|
|
/*
|
678 |
|
|
* ADDIP Section 4.1 ASCONF Chunk Procedures
|
679 |
|
|
* A4) Start a T-4 RTO timer, using the RTO value of the selected
|
680 |
|
|
* destination address (we use active path instead of primary path just
|
681 |
|
|
* because primary path may be inactive.
|
682 |
|
|
*/
|
683 |
|
|
static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds,
|
684 |
|
|
struct sctp_association *asoc,
|
685 |
|
|
struct sctp_chunk *chunk)
|
686 |
|
|
{
|
687 |
|
|
struct sctp_transport *t;
|
688 |
|
|
|
689 |
|
|
t = asoc->peer.active_path;
|
690 |
|
|
asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
|
691 |
|
|
chunk->transport = t;
|
692 |
|
|
}
|
693 |
|
|
|
694 |
|
|
/* Process an incoming Operation Error Chunk. */
|
695 |
|
|
static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds,
|
696 |
|
|
struct sctp_association *asoc,
|
697 |
|
|
struct sctp_chunk *chunk)
|
698 |
|
|
{
|
699 |
|
|
struct sctp_operr_chunk *operr_chunk;
|
700 |
|
|
struct sctp_errhdr *err_hdr;
|
701 |
|
|
|
702 |
|
|
operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr;
|
703 |
|
|
err_hdr = &operr_chunk->err_hdr;
|
704 |
|
|
|
705 |
|
|
switch (err_hdr->cause) {
|
706 |
|
|
case SCTP_ERROR_UNKNOWN_CHUNK:
|
707 |
|
|
{
|
708 |
|
|
struct sctp_chunkhdr *unk_chunk_hdr;
|
709 |
|
|
|
710 |
|
|
unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable;
|
711 |
|
|
switch (unk_chunk_hdr->type) {
|
712 |
|
|
/* ADDIP 4.1 A9) If the peer responds to an ASCONF with an
|
713 |
|
|
* ERROR chunk reporting that it did not recognized the ASCONF
|
714 |
|
|
* chunk type, the sender of the ASCONF MUST NOT send any
|
715 |
|
|
* further ASCONF chunks and MUST stop its T-4 timer.
|
716 |
|
|
*/
|
717 |
|
|
case SCTP_CID_ASCONF:
|
718 |
|
|
asoc->peer.asconf_capable = 0;
|
719 |
|
|
sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
|
720 |
|
|
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
|
721 |
|
|
break;
|
722 |
|
|
default:
|
723 |
|
|
break;
|
724 |
|
|
}
|
725 |
|
|
break;
|
726 |
|
|
}
|
727 |
|
|
default:
|
728 |
|
|
break;
|
729 |
|
|
}
|
730 |
|
|
}
|
731 |
|
|
|
732 |
|
|
/* These three macros allow us to pull the debugging code out of the
|
733 |
|
|
* main flow of sctp_do_sm() to keep attention focused on the real
|
734 |
|
|
* functionality there.
|
735 |
|
|
*/
|
736 |
|
|
#define DEBUG_PRE \
|
737 |
|
|
SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \
|
738 |
|
|
"ep %p, %s, %s, asoc %p[%s], %s\n", \
|
739 |
|
|
ep, sctp_evttype_tbl[event_type], \
|
740 |
|
|
(*debug_fn)(subtype), asoc, \
|
741 |
|
|
sctp_state_tbl[state], state_fn->name)
|
742 |
|
|
|
743 |
|
|
#define DEBUG_POST \
|
744 |
|
|
SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \
|
745 |
|
|
"asoc %p, status: %s\n", \
|
746 |
|
|
asoc, sctp_status_tbl[status])
|
747 |
|
|
|
748 |
|
|
#define DEBUG_POST_SFX \
|
749 |
|
|
SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \
|
750 |
|
|
error, asoc, \
|
751 |
|
|
sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
|
752 |
|
|
sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED])
|
753 |
|
|
|
754 |
|
|
/*
|
755 |
|
|
* This is the master state machine processing function.
|
756 |
|
|
*
|
757 |
|
|
* If you want to understand all of lksctp, this is a
|
758 |
|
|
* good place to start.
|
759 |
|
|
*/
|
760 |
|
|
int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
|
761 |
|
|
sctp_state_t state,
|
762 |
|
|
struct sctp_endpoint *ep,
|
763 |
|
|
struct sctp_association *asoc,
|
764 |
|
|
void *event_arg,
|
765 |
|
|
int gfp)
|
766 |
|
|
{
|
767 |
|
|
sctp_cmd_seq_t commands;
|
768 |
|
|
const sctp_sm_table_entry_t *state_fn;
|
769 |
|
|
sctp_disposition_t status;
|
770 |
|
|
int error = 0;
|
771 |
|
|
typedef const char *(printfn_t)(sctp_subtype_t);
|
772 |
|
|
|
773 |
|
|
static printfn_t *table[] = {
|
774 |
|
|
NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
|
775 |
|
|
};
|
776 |
|
|
printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
|
777 |
|
|
|
778 |
|
|
/* Look up the state function, run it, and then process the
|
779 |
|
|
* side effects. These three steps are the heart of lksctp.
|
780 |
|
|
*/
|
781 |
|
|
state_fn = sctp_sm_lookup_event(event_type, state, subtype);
|
782 |
|
|
|
783 |
|
|
sctp_init_cmd_seq(&commands);
|
784 |
|
|
|
785 |
|
|
DEBUG_PRE;
|
786 |
|
|
status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands);
|
787 |
|
|
DEBUG_POST;
|
788 |
|
|
|
789 |
|
|
error = sctp_side_effects(event_type, subtype, state,
|
790 |
|
|
ep, asoc, event_arg, status,
|
791 |
|
|
&commands, gfp);
|
792 |
|
|
DEBUG_POST_SFX;
|
793 |
|
|
|
794 |
|
|
return error;
|
795 |
|
|
}
|
796 |
|
|
|
797 |
|
|
#undef DEBUG_PRE
|
798 |
|
|
#undef DEBUG_POST
|
799 |
|
|
|
800 |
|
|
/*****************************************************************
|
801 |
|
|
* This the master state function side effect processing function.
|
802 |
|
|
*****************************************************************/
|
803 |
|
|
int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
|
804 |
|
|
sctp_state_t state,
|
805 |
|
|
struct sctp_endpoint *ep,
|
806 |
|
|
struct sctp_association *asoc,
|
807 |
|
|
void *event_arg,
|
808 |
|
|
sctp_disposition_t status,
|
809 |
|
|
sctp_cmd_seq_t *commands,
|
810 |
|
|
int gfp)
|
811 |
|
|
{
|
812 |
|
|
int error;
|
813 |
|
|
|
814 |
|
|
/* FIXME - Most of the dispositions left today would be categorized
|
815 |
|
|
* as "exceptional" dispositions. For those dispositions, it
|
816 |
|
|
* may not be proper to run through any of the commands at all.
|
817 |
|
|
* For example, the command interpreter might be run only with
|
818 |
|
|
* disposition SCTP_DISPOSITION_CONSUME.
|
819 |
|
|
*/
|
820 |
|
|
if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
|
821 |
|
|
ep, asoc,
|
822 |
|
|
event_arg, status,
|
823 |
|
|
commands, gfp)))
|
824 |
|
|
goto bail;
|
825 |
|
|
|
826 |
|
|
switch (status) {
|
827 |
|
|
case SCTP_DISPOSITION_DISCARD:
|
828 |
|
|
SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, "
|
829 |
|
|
"event_type %d, event_id %d\n",
|
830 |
|
|
state, event_type, subtype.chunk);
|
831 |
|
|
break;
|
832 |
|
|
|
833 |
|
|
case SCTP_DISPOSITION_NOMEM:
|
834 |
|
|
/* We ran out of memory, so we need to discard this
|
835 |
|
|
* packet.
|
836 |
|
|
*/
|
837 |
|
|
/* BUG--we should now recover some memory, probably by
|
838 |
|
|
* reneging...
|
839 |
|
|
*/
|
840 |
|
|
error = -ENOMEM;
|
841 |
|
|
break;
|
842 |
|
|
|
843 |
|
|
case SCTP_DISPOSITION_DELETE_TCB:
|
844 |
|
|
/* This should now be a command. */
|
845 |
|
|
break;
|
846 |
|
|
|
847 |
|
|
case SCTP_DISPOSITION_CONSUME:
|
848 |
|
|
case SCTP_DISPOSITION_ABORT:
|
849 |
|
|
/*
|
850 |
|
|
* We should no longer have much work to do here as the
|
851 |
|
|
* real work has been done as explicit commands above.
|
852 |
|
|
*/
|
853 |
|
|
break;
|
854 |
|
|
|
855 |
|
|
case SCTP_DISPOSITION_VIOLATION:
|
856 |
|
|
printk(KERN_ERR "sctp protocol violation state %d "
|
857 |
|
|
"chunkid %d\n", state, subtype.chunk);
|
858 |
|
|
break;
|
859 |
|
|
|
860 |
|
|
case SCTP_DISPOSITION_NOT_IMPL:
|
861 |
|
|
printk(KERN_WARNING "sctp unimplemented feature in state %d, "
|
862 |
|
|
"event_type %d, event_id %d\n",
|
863 |
|
|
state, event_type, subtype.chunk);
|
864 |
|
|
break;
|
865 |
|
|
|
866 |
|
|
case SCTP_DISPOSITION_BUG:
|
867 |
|
|
printk(KERN_ERR "sctp bug in state %d, "
|
868 |
|
|
"event_type %d, event_id %d\n",
|
869 |
|
|
state, event_type, subtype.chunk);
|
870 |
|
|
BUG();
|
871 |
|
|
break;
|
872 |
|
|
|
873 |
|
|
default:
|
874 |
|
|
printk(KERN_ERR "sctp impossible disposition %d "
|
875 |
|
|
"in state %d, event_type %d, event_id %d\n",
|
876 |
|
|
status, state, event_type, subtype.chunk);
|
877 |
|
|
BUG();
|
878 |
|
|
break;
|
879 |
|
|
};
|
880 |
|
|
|
881 |
|
|
bail:
|
882 |
|
|
return error;
|
883 |
|
|
}
|
884 |
|
|
|
885 |
|
|
/********************************************************************
|
886 |
|
|
* 2nd Level Abstractions
|
887 |
|
|
********************************************************************/
|
888 |
|
|
|
889 |
|
|
/* This is the side-effect interpreter. */
|
890 |
|
|
int sctp_cmd_interpreter(sctp_event_t event_type, sctp_subtype_t subtype,
|
891 |
|
|
sctp_state_t state, struct sctp_endpoint *ep,
|
892 |
|
|
struct sctp_association *asoc, void *event_arg,
|
893 |
|
|
sctp_disposition_t status, sctp_cmd_seq_t *commands,
|
894 |
|
|
int gfp)
|
895 |
|
|
{
|
896 |
|
|
int error = 0;
|
897 |
|
|
int force;
|
898 |
|
|
sctp_cmd_t *cmd;
|
899 |
|
|
struct sctp_chunk *new_obj;
|
900 |
|
|
struct sctp_chunk *chunk = NULL;
|
901 |
|
|
struct sctp_packet *packet;
|
902 |
|
|
struct list_head *pos;
|
903 |
|
|
struct timer_list *timer;
|
904 |
|
|
unsigned long timeout;
|
905 |
|
|
struct sctp_transport *t;
|
906 |
|
|
sctp_sackhdr_t sackh;
|
907 |
|
|
int local_cork = 0;
|
908 |
|
|
|
909 |
|
|
if (SCTP_EVENT_T_TIMEOUT != event_type)
|
910 |
|
|
chunk = (struct sctp_chunk *) event_arg;
|
911 |
|
|
|
912 |
|
|
/* Note: This whole file is a huge candidate for rework.
|
913 |
|
|
* For example, each command could either have its own handler, so
|
914 |
|
|
* the loop would look like:
|
915 |
|
|
* while (cmds)
|
916 |
|
|
* cmd->handle(x, y, z)
|
917 |
|
|
* --jgrimm
|
918 |
|
|
*/
|
919 |
|
|
while (NULL != (cmd = sctp_next_cmd(commands))) {
|
920 |
|
|
switch (cmd->verb) {
|
921 |
|
|
case SCTP_CMD_NOP:
|
922 |
|
|
/* Do nothing. */
|
923 |
|
|
break;
|
924 |
|
|
|
925 |
|
|
case SCTP_CMD_NEW_ASOC:
|
926 |
|
|
/* Register a new association. */
|
927 |
|
|
if (local_cork) {
|
928 |
|
|
sctp_outq_uncork(&asoc->outqueue);
|
929 |
|
|
local_cork = 0;
|
930 |
|
|
}
|
931 |
|
|
asoc = cmd->obj.ptr;
|
932 |
|
|
/* Register with the endpoint. */
|
933 |
|
|
sctp_endpoint_add_asoc(ep, asoc);
|
934 |
|
|
sctp_hash_established(asoc);
|
935 |
|
|
break;
|
936 |
|
|
|
937 |
|
|
case SCTP_CMD_UPDATE_ASSOC:
|
938 |
|
|
sctp_assoc_update(asoc, cmd->obj.ptr);
|
939 |
|
|
break;
|
940 |
|
|
|
941 |
|
|
case SCTP_CMD_PURGE_OUTQUEUE:
|
942 |
|
|
sctp_outq_teardown(&asoc->outqueue);
|
943 |
|
|
break;
|
944 |
|
|
|
945 |
|
|
case SCTP_CMD_DELETE_TCB:
|
946 |
|
|
if (local_cork) {
|
947 |
|
|
sctp_outq_uncork(&asoc->outqueue);
|
948 |
|
|
local_cork = 0;
|
949 |
|
|
}
|
950 |
|
|
/* Delete the current association. */
|
951 |
|
|
sctp_cmd_delete_tcb(commands, asoc);
|
952 |
|
|
asoc = NULL;
|
953 |
|
|
break;
|
954 |
|
|
|
955 |
|
|
case SCTP_CMD_NEW_STATE:
|
956 |
|
|
/* Enter a new state. */
|
957 |
|
|
sctp_cmd_new_state(commands, asoc, cmd->obj.state);
|
958 |
|
|
break;
|
959 |
|
|
|
960 |
|
|
case SCTP_CMD_REPORT_TSN:
|
961 |
|
|
/* Record the arrival of a TSN. */
|
962 |
|
|
sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32);
|
963 |
|
|
break;
|
964 |
|
|
|
965 |
|
|
case SCTP_CMD_GEN_SACK:
|
966 |
|
|
/* Generate a Selective ACK.
|
967 |
|
|
* The argument tells us whether to just count
|
968 |
|
|
* the packet and MAYBE generate a SACK, or
|
969 |
|
|
* force a SACK out.
|
970 |
|
|
*/
|
971 |
|
|
force = cmd->obj.i32;
|
972 |
|
|
error = sctp_gen_sack(asoc, force, commands);
|
973 |
|
|
break;
|
974 |
|
|
|
975 |
|
|
case SCTP_CMD_PROCESS_SACK:
|
976 |
|
|
/* Process an inbound SACK. */
|
977 |
|
|
error = sctp_cmd_process_sack(commands, asoc,
|
978 |
|
|
cmd->obj.ptr);
|
979 |
|
|
break;
|
980 |
|
|
|
981 |
|
|
case SCTP_CMD_GEN_INIT_ACK:
|
982 |
|
|
/* Generate an INIT ACK chunk. */
|
983 |
|
|
new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
|
984 |
|
|
0);
|
985 |
|
|
if (!new_obj)
|
986 |
|
|
goto nomem;
|
987 |
|
|
|
988 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
989 |
|
|
SCTP_CHUNK(new_obj));
|
990 |
|
|
break;
|
991 |
|
|
|
992 |
|
|
case SCTP_CMD_PEER_INIT:
|
993 |
|
|
/* Process a unified INIT from the peer.
|
994 |
|
|
* Note: Only used during INIT-ACK processing. If
|
995 |
|
|
* there is an error just return to the outter
|
996 |
|
|
* layer which will bail.
|
997 |
|
|
*/
|
998 |
|
|
error = sctp_cmd_process_init(commands, asoc, chunk,
|
999 |
|
|
cmd->obj.ptr, gfp);
|
1000 |
|
|
break;
|
1001 |
|
|
|
1002 |
|
|
case SCTP_CMD_GEN_COOKIE_ECHO:
|
1003 |
|
|
/* Generate a COOKIE ECHO chunk. */
|
1004 |
|
|
new_obj = sctp_make_cookie_echo(asoc, chunk);
|
1005 |
|
|
if (!new_obj) {
|
1006 |
|
|
if (cmd->obj.ptr)
|
1007 |
|
|
sctp_chunk_free(cmd->obj.ptr);
|
1008 |
|
|
goto nomem;
|
1009 |
|
|
}
|
1010 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
1011 |
|
|
SCTP_CHUNK(new_obj));
|
1012 |
|
|
|
1013 |
|
|
/* If there is an ERROR chunk to be sent along with
|
1014 |
|
|
* the COOKIE_ECHO, send it, too.
|
1015 |
|
|
*/
|
1016 |
|
|
if (cmd->obj.ptr)
|
1017 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
1018 |
|
|
SCTP_CHUNK(cmd->obj.ptr));
|
1019 |
|
|
break;
|
1020 |
|
|
|
1021 |
|
|
case SCTP_CMD_GEN_SHUTDOWN:
|
1022 |
|
|
/* Generate SHUTDOWN when in SHUTDOWN_SENT state.
|
1023 |
|
|
* Reset error counts.
|
1024 |
|
|
*/
|
1025 |
|
|
asoc->overall_error_count = 0;
|
1026 |
|
|
|
1027 |
|
|
/* Generate a SHUTDOWN chunk. */
|
1028 |
|
|
new_obj = sctp_make_shutdown(asoc, chunk);
|
1029 |
|
|
if (!new_obj)
|
1030 |
|
|
goto nomem;
|
1031 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
1032 |
|
|
SCTP_CHUNK(new_obj));
|
1033 |
|
|
break;
|
1034 |
|
|
|
1035 |
|
|
case SCTP_CMD_CHUNK_ULP:
|
1036 |
|
|
/* Send a chunk to the sockets layer. */
|
1037 |
|
|
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
|
1038 |
|
|
"chunk_up:", cmd->obj.ptr,
|
1039 |
|
|
"ulpq:", &asoc->ulpq);
|
1040 |
|
|
sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr,
|
1041 |
|
|
GFP_ATOMIC);
|
1042 |
|
|
break;
|
1043 |
|
|
|
1044 |
|
|
case SCTP_CMD_EVENT_ULP:
|
1045 |
|
|
/* Send a notification to the sockets layer. */
|
1046 |
|
|
SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n",
|
1047 |
|
|
"event_up:",cmd->obj.ptr,
|
1048 |
|
|
"ulpq:",&asoc->ulpq);
|
1049 |
|
|
sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr);
|
1050 |
|
|
break;
|
1051 |
|
|
|
1052 |
|
|
case SCTP_CMD_REPLY:
|
1053 |
|
|
/* If an caller has not already corked, do cork. */
|
1054 |
|
|
if (!asoc->outqueue.cork) {
|
1055 |
|
|
sctp_outq_cork(&asoc->outqueue);
|
1056 |
|
|
local_cork = 1;
|
1057 |
|
|
}
|
1058 |
|
|
/* Send a chunk to our peer. */
|
1059 |
|
|
error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr);
|
1060 |
|
|
break;
|
1061 |
|
|
|
1062 |
|
|
case SCTP_CMD_SEND_PKT:
|
1063 |
|
|
/* Send a full packet to our peer. */
|
1064 |
|
|
packet = cmd->obj.ptr;
|
1065 |
|
|
sctp_packet_transmit(packet);
|
1066 |
|
|
sctp_ootb_pkt_free(packet);
|
1067 |
|
|
break;
|
1068 |
|
|
|
1069 |
|
|
case SCTP_CMD_RETRAN:
|
1070 |
|
|
/* Mark a transport for retransmission. */
|
1071 |
|
|
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
|
1072 |
|
|
SCTP_RTXR_T3_RTX);
|
1073 |
|
|
break;
|
1074 |
|
|
|
1075 |
|
|
case SCTP_CMD_TRANSMIT:
|
1076 |
|
|
/* Kick start transmission. */
|
1077 |
|
|
error = sctp_outq_uncork(&asoc->outqueue);
|
1078 |
|
|
local_cork = 0;
|
1079 |
|
|
break;
|
1080 |
|
|
|
1081 |
|
|
case SCTP_CMD_ECN_CE:
|
1082 |
|
|
/* Do delayed CE processing. */
|
1083 |
|
|
sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
|
1084 |
|
|
break;
|
1085 |
|
|
|
1086 |
|
|
case SCTP_CMD_ECN_ECNE:
|
1087 |
|
|
/* Do delayed ECNE processing. */
|
1088 |
|
|
new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
|
1089 |
|
|
chunk);
|
1090 |
|
|
if (new_obj)
|
1091 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
|
1092 |
|
|
SCTP_CHUNK(new_obj));
|
1093 |
|
|
break;
|
1094 |
|
|
|
1095 |
|
|
case SCTP_CMD_ECN_CWR:
|
1096 |
|
|
/* Do delayed CWR processing. */
|
1097 |
|
|
sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
|
1098 |
|
|
break;
|
1099 |
|
|
|
1100 |
|
|
case SCTP_CMD_SETUP_T2:
|
1101 |
|
|
sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
|
1102 |
|
|
break;
|
1103 |
|
|
|
1104 |
|
|
case SCTP_CMD_TIMER_START:
|
1105 |
|
|
timer = &asoc->timers[cmd->obj.to];
|
1106 |
|
|
timeout = asoc->timeouts[cmd->obj.to];
|
1107 |
|
|
if (!timeout)
|
1108 |
|
|
BUG();
|
1109 |
|
|
|
1110 |
|
|
timer->expires = jiffies + timeout;
|
1111 |
|
|
sctp_association_hold(asoc);
|
1112 |
|
|
add_timer(timer);
|
1113 |
|
|
break;
|
1114 |
|
|
|
1115 |
|
|
case SCTP_CMD_TIMER_RESTART:
|
1116 |
|
|
timer = &asoc->timers[cmd->obj.to];
|
1117 |
|
|
timeout = asoc->timeouts[cmd->obj.to];
|
1118 |
|
|
if (!mod_timer(timer, jiffies + timeout))
|
1119 |
|
|
sctp_association_hold(asoc);
|
1120 |
|
|
break;
|
1121 |
|
|
|
1122 |
|
|
case SCTP_CMD_TIMER_STOP:
|
1123 |
|
|
timer = &asoc->timers[cmd->obj.to];
|
1124 |
|
|
if (timer_pending(timer) && del_timer(timer))
|
1125 |
|
|
sctp_association_put(asoc);
|
1126 |
|
|
break;
|
1127 |
|
|
|
1128 |
|
|
case SCTP_CMD_INIT_RESTART:
|
1129 |
|
|
/* Do the needed accounting and updates
|
1130 |
|
|
* associated with restarting an initialization
|
1131 |
|
|
* timer.
|
1132 |
|
|
*/
|
1133 |
|
|
asoc->counters[SCTP_COUNTER_INIT_ERROR]++;
|
1134 |
|
|
asoc->timeouts[cmd->obj.to] *= 2;
|
1135 |
|
|
if (asoc->timeouts[cmd->obj.to] >
|
1136 |
|
|
asoc->max_init_timeo) {
|
1137 |
|
|
asoc->timeouts[cmd->obj.to] =
|
1138 |
|
|
asoc->max_init_timeo;
|
1139 |
|
|
}
|
1140 |
|
|
|
1141 |
|
|
/* If we've sent any data bundled with
|
1142 |
|
|
* COOKIE-ECHO we need to resend.
|
1143 |
|
|
*/
|
1144 |
|
|
list_for_each(pos, &asoc->peer.transport_addr_list) {
|
1145 |
|
|
t = list_entry(pos, struct sctp_transport,
|
1146 |
|
|
transports);
|
1147 |
|
|
sctp_retransmit_mark(&asoc->outqueue, t, 0);
|
1148 |
|
|
}
|
1149 |
|
|
|
1150 |
|
|
sctp_add_cmd_sf(commands,
|
1151 |
|
|
SCTP_CMD_TIMER_RESTART,
|
1152 |
|
|
SCTP_TO(cmd->obj.to));
|
1153 |
|
|
break;
|
1154 |
|
|
|
1155 |
|
|
case SCTP_CMD_INIT_FAILED:
|
1156 |
|
|
sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
|
1157 |
|
|
break;
|
1158 |
|
|
|
1159 |
|
|
case SCTP_CMD_ASSOC_FAILED:
|
1160 |
|
|
sctp_cmd_assoc_failed(commands, asoc, event_type,
|
1161 |
|
|
subtype, chunk, cmd->obj.u32);
|
1162 |
|
|
break;
|
1163 |
|
|
|
1164 |
|
|
case SCTP_CMD_COUNTER_INC:
|
1165 |
|
|
asoc->counters[cmd->obj.counter]++;
|
1166 |
|
|
break;
|
1167 |
|
|
|
1168 |
|
|
case SCTP_CMD_COUNTER_RESET:
|
1169 |
|
|
asoc->counters[cmd->obj.counter] = 0;
|
1170 |
|
|
break;
|
1171 |
|
|
|
1172 |
|
|
case SCTP_CMD_REPORT_DUP:
|
1173 |
|
|
sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
|
1174 |
|
|
cmd->obj.u32);
|
1175 |
|
|
break;
|
1176 |
|
|
|
1177 |
|
|
case SCTP_CMD_REPORT_BAD_TAG:
|
1178 |
|
|
SCTP_DEBUG_PRINTK("vtag mismatch!\n");
|
1179 |
|
|
break;
|
1180 |
|
|
|
1181 |
|
|
case SCTP_CMD_STRIKE:
|
1182 |
|
|
/* Mark one strike against a transport. */
|
1183 |
|
|
sctp_do_8_2_transport_strike(asoc, cmd->obj.transport);
|
1184 |
|
|
break;
|
1185 |
|
|
|
1186 |
|
|
case SCTP_CMD_TRANSPORT_RESET:
|
1187 |
|
|
t = cmd->obj.transport;
|
1188 |
|
|
sctp_cmd_transport_reset(commands, asoc, t);
|
1189 |
|
|
break;
|
1190 |
|
|
|
1191 |
|
|
case SCTP_CMD_TRANSPORT_ON:
|
1192 |
|
|
t = cmd->obj.transport;
|
1193 |
|
|
sctp_cmd_transport_on(commands, asoc, t, chunk);
|
1194 |
|
|
break;
|
1195 |
|
|
|
1196 |
|
|
case SCTP_CMD_HB_TIMERS_START:
|
1197 |
|
|
sctp_cmd_hb_timers_start(commands, asoc);
|
1198 |
|
|
break;
|
1199 |
|
|
|
1200 |
|
|
case SCTP_CMD_HB_TIMER_UPDATE:
|
1201 |
|
|
t = cmd->obj.transport;
|
1202 |
|
|
sctp_cmd_hb_timer_update(commands, asoc, t);
|
1203 |
|
|
break;
|
1204 |
|
|
|
1205 |
|
|
case SCTP_CMD_HB_TIMERS_STOP:
|
1206 |
|
|
sctp_cmd_hb_timers_stop(commands, asoc);
|
1207 |
|
|
break;
|
1208 |
|
|
|
1209 |
|
|
case SCTP_CMD_REPORT_ERROR:
|
1210 |
|
|
error = cmd->obj.error;
|
1211 |
|
|
break;
|
1212 |
|
|
|
1213 |
|
|
case SCTP_CMD_PROCESS_CTSN:
|
1214 |
|
|
/* Dummy up a SACK for processing. */
|
1215 |
|
|
sackh.cum_tsn_ack = cmd->obj.u32;
|
1216 |
|
|
sackh.a_rwnd = 0;
|
1217 |
|
|
sackh.num_gap_ack_blocks = 0;
|
1218 |
|
|
sackh.num_dup_tsns = 0;
|
1219 |
|
|
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
|
1220 |
|
|
SCTP_SACKH(&sackh));
|
1221 |
|
|
break;
|
1222 |
|
|
|
1223 |
|
|
case SCTP_CMD_DISCARD_PACKET:
|
1224 |
|
|
/* We need to discard the whole packet. */
|
1225 |
|
|
chunk->pdiscard = 1;
|
1226 |
|
|
break;
|
1227 |
|
|
|
1228 |
|
|
case SCTP_CMD_RTO_PENDING:
|
1229 |
|
|
t = cmd->obj.transport;
|
1230 |
|
|
t->rto_pending = 1;
|
1231 |
|
|
break;
|
1232 |
|
|
|
1233 |
|
|
case SCTP_CMD_PART_DELIVER:
|
1234 |
|
|
sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr,
|
1235 |
|
|
GFP_ATOMIC);
|
1236 |
|
|
break;
|
1237 |
|
|
|
1238 |
|
|
case SCTP_CMD_RENEGE:
|
1239 |
|
|
sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr,
|
1240 |
|
|
GFP_ATOMIC);
|
1241 |
|
|
break;
|
1242 |
|
|
|
1243 |
|
|
case SCTP_CMD_SETUP_T4:
|
1244 |
|
|
sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr);
|
1245 |
|
|
break;
|
1246 |
|
|
|
1247 |
|
|
case SCTP_CMD_PROCESS_OPERR:
|
1248 |
|
|
sctp_cmd_process_operr(commands, asoc, chunk);
|
1249 |
|
|
break;
|
1250 |
|
|
default:
|
1251 |
|
|
printk(KERN_WARNING "Impossible command: %u, %p\n",
|
1252 |
|
|
cmd->verb, cmd->obj.ptr);
|
1253 |
|
|
break;
|
1254 |
|
|
};
|
1255 |
|
|
if (error)
|
1256 |
|
|
break;
|
1257 |
|
|
}
|
1258 |
|
|
|
1259 |
|
|
out:
|
1260 |
|
|
if (local_cork)
|
1261 |
|
|
sctp_outq_uncork(&asoc->outqueue);
|
1262 |
|
|
return error;
|
1263 |
|
|
nomem:
|
1264 |
|
|
error = -ENOMEM;
|
1265 |
|
|
goto out;
|
1266 |
|
|
}
|
1267 |
|
|
|