1 |
1275 |
phoenix |
/* SCTP kernel reference Implementation
|
2 |
|
|
* Copyright (c) 1999-2000 Cisco, Inc.
|
3 |
|
|
* Copyright (c) 1999-2001 Motorola, Inc.
|
4 |
|
|
* Copyright (c) 2001-2003 Intel Corp.
|
5 |
|
|
* Copyright (c) 2001-2003 International Business Machines Corp.
|
6 |
|
|
*
|
7 |
|
|
* This file is part of the SCTP kernel reference Implementation
|
8 |
|
|
*
|
9 |
|
|
* These functions implement the sctp_outq class. The outqueue handles
|
10 |
|
|
* bundling and queueing of outgoing SCTP chunks.
|
11 |
|
|
*
|
12 |
|
|
* The SCTP reference implementation is free software;
|
13 |
|
|
* you can redistribute it and/or modify it under the terms of
|
14 |
|
|
* the GNU General Public License as published by
|
15 |
|
|
* the Free Software Foundation; either version 2, or (at your option)
|
16 |
|
|
* any later version.
|
17 |
|
|
*
|
18 |
|
|
* The SCTP reference implementation is distributed in the hope that it
|
19 |
|
|
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
|
20 |
|
|
* ************************
|
21 |
|
|
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
22 |
|
|
* See the GNU General Public License for more details.
|
23 |
|
|
*
|
24 |
|
|
* You should have received a copy of the GNU General Public License
|
25 |
|
|
* along with GNU CC; see the file COPYING. If not, write to
|
26 |
|
|
* the Free Software Foundation, 59 Temple Place - Suite 330,
|
27 |
|
|
* Boston, MA 02111-1307, USA.
|
28 |
|
|
*
|
29 |
|
|
* Please send any bug reports or fixes you make to the
|
30 |
|
|
* email address(es):
|
31 |
|
|
* lksctp developers <lksctp-developers@lists.sourceforge.net>
|
32 |
|
|
*
|
33 |
|
|
* Or submit a bug report through the following website:
|
34 |
|
|
* http://www.sf.net/projects/lksctp
|
35 |
|
|
*
|
36 |
|
|
* Written or modified by:
|
37 |
|
|
* La Monte H.P. Yarroll <piggy@acm.org>
|
38 |
|
|
* Karl Knutson <karl@athena.chicago.il.us>
|
39 |
|
|
* Perry Melange <pmelange@null.cc.uic.edu>
|
40 |
|
|
* Xingang Guo <xingang.guo@intel.com>
|
41 |
|
|
* Hui Huang <hui.huang@nokia.com>
|
42 |
|
|
* Sridhar Samudrala <sri@us.ibm.com>
|
43 |
|
|
* Jon Grimm <jgrimm@us.ibm.com>
|
44 |
|
|
*
|
45 |
|
|
* Any bugs reported given to us we will try to fix... any fixes shared will
|
46 |
|
|
* be incorporated into the next SCTP release.
|
47 |
|
|
*/
|
48 |
|
|
|
49 |
|
|
#include <linux/types.h>
|
50 |
|
|
#include <linux/list.h> /* For struct list_head */
|
51 |
|
|
#include <linux/socket.h>
|
52 |
|
|
#include <linux/ip.h>
|
53 |
|
|
#include <net/sock.h> /* For skb_set_owner_w */
|
54 |
|
|
|
55 |
|
|
#include <net/sctp/sctp.h>
|
56 |
|
|
#include <net/sctp/sm.h>
|
57 |
|
|
|
58 |
|
|
/* Declare internal functions here. */
|
59 |
|
|
static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
|
60 |
|
|
static void sctp_check_transmitted(struct sctp_outq *q,
|
61 |
|
|
struct list_head *transmitted_queue,
|
62 |
|
|
struct sctp_transport *transport,
|
63 |
|
|
struct sctp_sackhdr *sack,
|
64 |
|
|
__u32 highest_new_tsn);
|
65 |
|
|
|
66 |
|
|
static void sctp_mark_missing(struct sctp_outq *q,
|
67 |
|
|
struct list_head *transmitted_queue,
|
68 |
|
|
struct sctp_transport *transport,
|
69 |
|
|
__u32 highest_new_tsn,
|
70 |
|
|
int count_of_newacks);
|
71 |
|
|
|
72 |
|
|
/* Add data to the front of the queue. */
|
73 |
|
|
static inline void sctp_outq_head_data(struct sctp_outq *q,
|
74 |
|
|
struct sctp_chunk *ch)
|
75 |
|
|
{
|
76 |
|
|
__skb_queue_head(&q->out, (struct sk_buff *)ch);
|
77 |
|
|
q->out_qlen += ch->skb->len;
|
78 |
|
|
return;
|
79 |
|
|
}
|
80 |
|
|
|
81 |
|
|
/* Take data from the front of the queue. */
|
82 |
|
|
static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
|
83 |
|
|
{
|
84 |
|
|
struct sctp_chunk *ch;
|
85 |
|
|
ch = (struct sctp_chunk *)__skb_dequeue(&q->out);
|
86 |
|
|
if (ch)
|
87 |
|
|
q->out_qlen -= ch->skb->len;
|
88 |
|
|
return ch;
|
89 |
|
|
}
|
90 |
|
|
/* Add data chunk to the end of the queue. */
|
91 |
|
|
static inline void sctp_outq_tail_data(struct sctp_outq *q,
|
92 |
|
|
struct sctp_chunk *ch)
|
93 |
|
|
{
|
94 |
|
|
__skb_queue_tail(&q->out, (struct sk_buff *)ch);
|
95 |
|
|
q->out_qlen += ch->skb->len;
|
96 |
|
|
return;
|
97 |
|
|
}
|
98 |
|
|
|
99 |
|
|
/* Insert a chunk behind chunk 'pos'. */
|
100 |
|
|
static inline void sctp_outq_insert_data(struct sctp_outq *q,
|
101 |
|
|
struct sctp_chunk *ch,
|
102 |
|
|
struct sctp_chunk *pos)
|
103 |
|
|
{
|
104 |
|
|
__skb_insert((struct sk_buff *)ch, (struct sk_buff *)pos->prev,
|
105 |
|
|
(struct sk_buff *)pos, pos->list);
|
106 |
|
|
q->out_qlen += ch->skb->len;
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
/*
|
110 |
|
|
* SFR-CACC algorithm:
|
111 |
|
|
* D) If count_of_newacks is greater than or equal to 2
|
112 |
|
|
* and t was not sent to the current primary then the
|
113 |
|
|
* sender MUST NOT increment missing report count for t.
|
114 |
|
|
*/
|
115 |
|
|
static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
|
116 |
|
|
struct sctp_transport *transport,
|
117 |
|
|
int count_of_newacks)
|
118 |
|
|
{
|
119 |
|
|
if (count_of_newacks >=2 && transport != primary)
|
120 |
|
|
return 1;
|
121 |
|
|
return 0;
|
122 |
|
|
}
|
123 |
|
|
|
124 |
|
|
/*
|
125 |
|
|
* SFR-CACC algorithm:
|
126 |
|
|
* F) If count_of_newacks is less than 2, let d be the
|
127 |
|
|
* destination to which t was sent. If cacc_saw_newack
|
128 |
|
|
* is 0 for destination d, then the sender MUST NOT
|
129 |
|
|
* increment missing report count for t.
|
130 |
|
|
*/
|
131 |
|
|
static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
|
132 |
|
|
int count_of_newacks)
|
133 |
|
|
{
|
134 |
|
|
if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack)
|
135 |
|
|
return 1;
|
136 |
|
|
return 0;
|
137 |
|
|
}
|
138 |
|
|
|
139 |
|
|
/*
|
140 |
|
|
* SFR-CACC algorithm:
|
141 |
|
|
* 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
|
142 |
|
|
* execute steps C, D, F.
|
143 |
|
|
*
|
144 |
|
|
* C has been implemented in sctp_outq_sack
|
145 |
|
|
*/
|
146 |
|
|
static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
|
147 |
|
|
struct sctp_transport *transport,
|
148 |
|
|
int count_of_newacks)
|
149 |
|
|
{
|
150 |
|
|
if (!primary->cacc.cycling_changeover) {
|
151 |
|
|
if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
|
152 |
|
|
return 1;
|
153 |
|
|
if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
|
154 |
|
|
return 1;
|
155 |
|
|
return 0;
|
156 |
|
|
}
|
157 |
|
|
return 0;
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
/*
|
161 |
|
|
* SFR-CACC algorithm:
|
162 |
|
|
* 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
|
163 |
|
|
* than next_tsn_at_change of the current primary, then
|
164 |
|
|
* the sender MUST NOT increment missing report count
|
165 |
|
|
* for t.
|
166 |
|
|
*/
|
167 |
|
|
static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
|
168 |
|
|
{
|
169 |
|
|
if (primary->cacc.cycling_changeover &&
|
170 |
|
|
TSN_lt(tsn, primary->cacc.next_tsn_at_change))
|
171 |
|
|
return 1;
|
172 |
|
|
return 0;
|
173 |
|
|
}
|
174 |
|
|
|
175 |
|
|
/*
|
176 |
|
|
* SFR-CACC algorithm:
|
177 |
|
|
* 3) If the missing report count for TSN t is to be
|
178 |
|
|
* incremented according to [RFC2960] and
|
179 |
|
|
* [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
|
180 |
|
|
* then the sender MUST futher execute steps 3.1 and
|
181 |
|
|
* 3.2 to determine if the missing report count for
|
182 |
|
|
* TSN t SHOULD NOT be incremented.
|
183 |
|
|
*
|
184 |
|
|
* 3.3) If 3.1 and 3.2 do not dictate that the missing
|
185 |
|
|
* report count for t should not be incremented, then
|
186 |
|
|
* the sender SOULD increment missing report count for
|
187 |
|
|
* t (according to [RFC2960] and [SCTP_STEWART_2002]).
|
188 |
|
|
*/
|
189 |
|
|
static inline int sctp_cacc_skip(struct sctp_transport *primary,
|
190 |
|
|
struct sctp_transport *transport,
|
191 |
|
|
int count_of_newacks,
|
192 |
|
|
__u32 tsn)
|
193 |
|
|
{
|
194 |
|
|
if (primary->cacc.changeover_active &&
|
195 |
|
|
(sctp_cacc_skip_3_1(primary, transport, count_of_newacks)
|
196 |
|
|
|| sctp_cacc_skip_3_2(primary, tsn)))
|
197 |
|
|
return 1;
|
198 |
|
|
return 0;
|
199 |
|
|
}
|
200 |
|
|
|
201 |
|
|
/* Generate a new outqueue. */
|
202 |
|
|
struct sctp_outq *sctp_outq_new(struct sctp_association *asoc)
|
203 |
|
|
{
|
204 |
|
|
struct sctp_outq *q;
|
205 |
|
|
|
206 |
|
|
q = t_new(struct sctp_outq, GFP_KERNEL);
|
207 |
|
|
if (q) {
|
208 |
|
|
sctp_outq_init(asoc, q);
|
209 |
|
|
q->malloced = 1;
|
210 |
|
|
}
|
211 |
|
|
return q;
|
212 |
|
|
}
|
213 |
|
|
|
214 |
|
|
/* Initialize an existing sctp_outq. This does the boring stuff.
|
215 |
|
|
* You still need to define handlers if you really want to DO
|
216 |
|
|
* something with this structure...
|
217 |
|
|
*/
|
218 |
|
|
void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
|
219 |
|
|
{
|
220 |
|
|
q->asoc = asoc;
|
221 |
|
|
skb_queue_head_init(&q->out);
|
222 |
|
|
skb_queue_head_init(&q->control);
|
223 |
|
|
INIT_LIST_HEAD(&q->retransmit);
|
224 |
|
|
INIT_LIST_HEAD(&q->sacked);
|
225 |
|
|
|
226 |
|
|
q->init_output = NULL;
|
227 |
|
|
q->config_output = NULL;
|
228 |
|
|
q->append_output = NULL;
|
229 |
|
|
q->build_output = NULL;
|
230 |
|
|
q->force_output = NULL;
|
231 |
|
|
|
232 |
|
|
q->outstanding_bytes = 0;
|
233 |
|
|
q->empty = 1;
|
234 |
|
|
q->cork = 0;
|
235 |
|
|
|
236 |
|
|
q->malloced = 0;
|
237 |
|
|
q->out_qlen = 0;
|
238 |
|
|
}
|
239 |
|
|
|
240 |
|
|
/* Free the outqueue structure and any related pending chunks.
|
241 |
|
|
*/
|
242 |
|
|
void sctp_outq_teardown(struct sctp_outq *q)
|
243 |
|
|
{
|
244 |
|
|
struct sctp_transport *transport;
|
245 |
|
|
struct list_head *lchunk, *pos, *temp;
|
246 |
|
|
struct sctp_chunk *chunk;
|
247 |
|
|
|
248 |
|
|
/* Throw away unacknowledged chunks. */
|
249 |
|
|
list_for_each(pos, &q->asoc->peer.transport_addr_list) {
|
250 |
|
|
transport = list_entry(pos, struct sctp_transport, transports);
|
251 |
|
|
while ((lchunk = sctp_list_dequeue(&transport->transmitted))) {
|
252 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
253 |
|
|
transmitted_list);
|
254 |
|
|
/* Mark as part of a failed message. */
|
255 |
|
|
sctp_datamsg_fail(chunk, q->error);
|
256 |
|
|
sctp_chunk_free(chunk);
|
257 |
|
|
}
|
258 |
|
|
}
|
259 |
|
|
|
260 |
|
|
/* Throw away chunks that have been gap ACKed. */
|
261 |
|
|
list_for_each_safe(lchunk, temp, &q->sacked) {
|
262 |
|
|
list_del_init(lchunk);
|
263 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
264 |
|
|
transmitted_list);
|
265 |
|
|
sctp_datamsg_fail(chunk, q->error);
|
266 |
|
|
sctp_chunk_free(chunk);
|
267 |
|
|
}
|
268 |
|
|
|
269 |
|
|
/* Throw away any chunks in the retransmit queue. */
|
270 |
|
|
list_for_each_safe(lchunk, temp, &q->retransmit) {
|
271 |
|
|
list_del_init(lchunk);
|
272 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
273 |
|
|
transmitted_list);
|
274 |
|
|
sctp_datamsg_fail(chunk, q->error);
|
275 |
|
|
sctp_chunk_free(chunk);
|
276 |
|
|
}
|
277 |
|
|
|
278 |
|
|
/* Throw away any leftover data chunks. */
|
279 |
|
|
while ((chunk = sctp_outq_dequeue_data(q))) {
|
280 |
|
|
|
281 |
|
|
/* Mark as send failure. */
|
282 |
|
|
sctp_datamsg_fail(chunk, q->error);
|
283 |
|
|
sctp_chunk_free(chunk);
|
284 |
|
|
}
|
285 |
|
|
|
286 |
|
|
q->error = 0;
|
287 |
|
|
|
288 |
|
|
/* Throw away any leftover control chunks. */
|
289 |
|
|
while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)))
|
290 |
|
|
sctp_chunk_free(chunk);
|
291 |
|
|
}
|
292 |
|
|
|
293 |
|
|
/* Free the outqueue structure and any related pending chunks. */
|
294 |
|
|
void sctp_outq_free(struct sctp_outq *q)
|
295 |
|
|
{
|
296 |
|
|
/* Throw away leftover chunks. */
|
297 |
|
|
sctp_outq_teardown(q);
|
298 |
|
|
|
299 |
|
|
/* If we were kmalloc()'d, free the memory. */
|
300 |
|
|
if (q->malloced)
|
301 |
|
|
kfree(q);
|
302 |
|
|
}
|
303 |
|
|
|
304 |
|
|
/* Put a new chunk in an sctp_outq. */
|
305 |
|
|
int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
|
306 |
|
|
{
|
307 |
|
|
int error = 0;
|
308 |
|
|
|
309 |
|
|
SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
|
310 |
|
|
q, chunk, chunk && chunk->chunk_hdr ?
|
311 |
|
|
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
|
312 |
|
|
: "Illegal Chunk");
|
313 |
|
|
|
314 |
|
|
/* If it is data, queue it up, otherwise, send it
|
315 |
|
|
* immediately.
|
316 |
|
|
*/
|
317 |
|
|
if (SCTP_CID_DATA == chunk->chunk_hdr->type) {
|
318 |
|
|
/* Is it OK to queue data chunks? */
|
319 |
|
|
/* From 9. Termination of Association
|
320 |
|
|
*
|
321 |
|
|
* When either endpoint performs a shutdown, the
|
322 |
|
|
* association on each peer will stop accepting new
|
323 |
|
|
* data from its user and only deliver data in queue
|
324 |
|
|
* at the time of sending or receiving the SHUTDOWN
|
325 |
|
|
* chunk.
|
326 |
|
|
*/
|
327 |
|
|
switch (q->asoc->state) {
|
328 |
|
|
case SCTP_STATE_EMPTY:
|
329 |
|
|
case SCTP_STATE_CLOSED:
|
330 |
|
|
case SCTP_STATE_SHUTDOWN_PENDING:
|
331 |
|
|
case SCTP_STATE_SHUTDOWN_SENT:
|
332 |
|
|
case SCTP_STATE_SHUTDOWN_RECEIVED:
|
333 |
|
|
case SCTP_STATE_SHUTDOWN_ACK_SENT:
|
334 |
|
|
/* Cannot send after transport endpoint shutdown */
|
335 |
|
|
error = -ESHUTDOWN;
|
336 |
|
|
break;
|
337 |
|
|
|
338 |
|
|
default:
|
339 |
|
|
SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
|
340 |
|
|
q, chunk, chunk && chunk->chunk_hdr ?
|
341 |
|
|
sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
|
342 |
|
|
: "Illegal Chunk");
|
343 |
|
|
|
344 |
|
|
sctp_outq_tail_data(q, chunk);
|
345 |
|
|
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
|
346 |
|
|
SCTP_INC_STATS(SctpOutUnorderChunks);
|
347 |
|
|
else
|
348 |
|
|
SCTP_INC_STATS(SctpOutOrderChunks);
|
349 |
|
|
q->empty = 0;
|
350 |
|
|
break;
|
351 |
|
|
};
|
352 |
|
|
} else {
|
353 |
|
|
__skb_queue_tail(&q->control, (struct sk_buff *) chunk);
|
354 |
|
|
SCTP_INC_STATS(SctpOutCtrlChunks);
|
355 |
|
|
}
|
356 |
|
|
|
357 |
|
|
if (error < 0)
|
358 |
|
|
return error;
|
359 |
|
|
|
360 |
|
|
if (!q->cork)
|
361 |
|
|
error = sctp_outq_flush(q, 0);
|
362 |
|
|
|
363 |
|
|
return error;
|
364 |
|
|
}
|
365 |
|
|
|
366 |
|
|
/* Insert a chunk into the retransmit queue. Chunks on the retransmit
|
367 |
|
|
* queue are kept in order, based on the TSNs.
|
368 |
|
|
*/
|
369 |
|
|
void sctp_retransmit_insert(struct list_head *tlchunk, struct sctp_outq *q)
|
370 |
|
|
{
|
371 |
|
|
struct list_head *rlchunk;
|
372 |
|
|
struct sctp_chunk *tchunk, *rchunk;
|
373 |
|
|
__u32 ttsn, rtsn;
|
374 |
|
|
int done = 0;
|
375 |
|
|
|
376 |
|
|
tchunk = list_entry(tlchunk, struct sctp_chunk, transmitted_list);
|
377 |
|
|
ttsn = ntohl(tchunk->subh.data_hdr->tsn);
|
378 |
|
|
|
379 |
|
|
list_for_each(rlchunk, &q->retransmit) {
|
380 |
|
|
rchunk = list_entry(rlchunk, struct sctp_chunk,
|
381 |
|
|
transmitted_list);
|
382 |
|
|
rtsn = ntohl(rchunk->subh.data_hdr->tsn);
|
383 |
|
|
if (TSN_lt(ttsn, rtsn)) {
|
384 |
|
|
list_add(tlchunk, rlchunk->prev);
|
385 |
|
|
done = 1;
|
386 |
|
|
break;
|
387 |
|
|
}
|
388 |
|
|
}
|
389 |
|
|
if (!done) {
|
390 |
|
|
list_add_tail(tlchunk, &q->retransmit);
|
391 |
|
|
}
|
392 |
|
|
}
|
393 |
|
|
|
394 |
|
|
/* Mark all the eligible packets on a transport for retransmission. */
|
395 |
|
|
void sctp_retransmit_mark(struct sctp_outq *q,
|
396 |
|
|
struct sctp_transport *transport,
|
397 |
|
|
__u8 fast_retransmit)
|
398 |
|
|
{
|
399 |
|
|
struct list_head *lchunk, *ltemp;
|
400 |
|
|
struct sctp_chunk *chunk;
|
401 |
|
|
|
402 |
|
|
/* Walk through the specified transmitted queue. */
|
403 |
|
|
list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
|
404 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
405 |
|
|
transmitted_list);
|
406 |
|
|
|
407 |
|
|
/* If we are doing retransmission due to a fast retransmit,
|
408 |
|
|
* only the chunk's that are marked for fast retransmit
|
409 |
|
|
* should be added to the retransmit queue. If we are doing
|
410 |
|
|
* retransmission due to a timeout or pmtu discovery, only the
|
411 |
|
|
* chunks that are not yet acked should be added to the
|
412 |
|
|
* retransmit queue.
|
413 |
|
|
*/
|
414 |
|
|
if ((fast_retransmit && chunk->fast_retransmit) ||
|
415 |
|
|
(!fast_retransmit && !chunk->tsn_gap_acked)) {
|
416 |
|
|
/* RFC 2960 6.2.1 Processing a Received SACK
|
417 |
|
|
*
|
418 |
|
|
* C) Any time a DATA chunk is marked for
|
419 |
|
|
* retransmission (via either T3-rtx timer expiration
|
420 |
|
|
* (Section 6.3.3) or via fast retransmit
|
421 |
|
|
* (Section 7.2.4)), add the data size of those
|
422 |
|
|
* chunks to the rwnd.
|
423 |
|
|
*/
|
424 |
|
|
q->asoc->peer.rwnd += sctp_data_size(chunk);
|
425 |
|
|
q->outstanding_bytes -= sctp_data_size(chunk);
|
426 |
|
|
transport->flight_size -= sctp_data_size(chunk);
|
427 |
|
|
|
428 |
|
|
/* sctpimpguide-05 Section 2.8.2
|
429 |
|
|
* M5) If a T3-rtx timer expires, the
|
430 |
|
|
* 'TSN.Missing.Report' of all affected TSNs is set
|
431 |
|
|
* to 0.
|
432 |
|
|
*/
|
433 |
|
|
chunk->tsn_missing_report = 0;
|
434 |
|
|
|
435 |
|
|
/* If a chunk that is being used for RTT measurement
|
436 |
|
|
* has to be retransmitted, we cannot use this chunk
|
437 |
|
|
* anymore for RTT measurements. Reset rto_pending so
|
438 |
|
|
* that a new RTT measurement is started when a new
|
439 |
|
|
* data chunk is sent.
|
440 |
|
|
*/
|
441 |
|
|
if (chunk->rtt_in_progress) {
|
442 |
|
|
chunk->rtt_in_progress = 0;
|
443 |
|
|
transport->rto_pending = 0;
|
444 |
|
|
}
|
445 |
|
|
|
446 |
|
|
/* Move the chunk to the retransmit queue. The chunks
|
447 |
|
|
* on the retransmit queue is always kept in order.
|
448 |
|
|
*/
|
449 |
|
|
list_del_init(lchunk);
|
450 |
|
|
sctp_retransmit_insert(lchunk, q);
|
451 |
|
|
}
|
452 |
|
|
}
|
453 |
|
|
|
454 |
|
|
SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, "
|
455 |
|
|
"cwnd: %d, ssthresh: %d, flight_size: %d, "
|
456 |
|
|
"pba: %d\n", __FUNCTION__,
|
457 |
|
|
transport, fast_retransmit,
|
458 |
|
|
transport->cwnd, transport->ssthresh,
|
459 |
|
|
transport->flight_size,
|
460 |
|
|
transport->partial_bytes_acked);
|
461 |
|
|
|
462 |
|
|
}
|
463 |
|
|
|
464 |
|
|
/* Mark all the eligible packets on a transport for retransmission and force
|
465 |
|
|
* one packet out.
|
466 |
|
|
*/
|
467 |
|
|
void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
|
468 |
|
|
sctp_retransmit_reason_t reason)
|
469 |
|
|
{
|
470 |
|
|
int error = 0;
|
471 |
|
|
__u8 fast_retransmit = 0;
|
472 |
|
|
|
473 |
|
|
switch(reason) {
|
474 |
|
|
case SCTP_RTXR_T3_RTX:
|
475 |
|
|
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
|
476 |
|
|
/* Update the retran path if the T3-rtx timer has expired for
|
477 |
|
|
* the current retran path.
|
478 |
|
|
*/
|
479 |
|
|
if (transport == transport->asoc->peer.retran_path)
|
480 |
|
|
sctp_assoc_update_retran_path(transport->asoc);
|
481 |
|
|
break;
|
482 |
|
|
case SCTP_RTXR_FAST_RTX:
|
483 |
|
|
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
|
484 |
|
|
fast_retransmit = 1;
|
485 |
|
|
break;
|
486 |
|
|
case SCTP_RTXR_PMTUD:
|
487 |
|
|
default:
|
488 |
|
|
break;
|
489 |
|
|
}
|
490 |
|
|
|
491 |
|
|
sctp_retransmit_mark(q, transport, fast_retransmit);
|
492 |
|
|
|
493 |
|
|
error = sctp_outq_flush(q, /* rtx_timeout */ 1);
|
494 |
|
|
|
495 |
|
|
if (error)
|
496 |
|
|
q->asoc->base.sk->sk_err = -error;
|
497 |
|
|
}
|
498 |
|
|
|
499 |
|
|
/*
|
500 |
|
|
* Transmit DATA chunks on the retransmit queue. Upon return from
|
501 |
|
|
* sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
|
502 |
|
|
* need to be transmitted by the caller.
|
503 |
|
|
* We assume that pkt->transport has already been set.
|
504 |
|
|
*
|
505 |
|
|
* The return value is a normal kernel error return value.
|
506 |
|
|
*/
|
507 |
|
|
static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
|
508 |
|
|
int rtx_timeout, int *start_timer)
|
509 |
|
|
{
|
510 |
|
|
struct list_head *lqueue;
|
511 |
|
|
struct list_head *lchunk;
|
512 |
|
|
struct sctp_transport *transport = pkt->transport;
|
513 |
|
|
sctp_xmit_t status;
|
514 |
|
|
struct sctp_chunk *chunk;
|
515 |
|
|
struct sctp_association *asoc;
|
516 |
|
|
int error = 0;
|
517 |
|
|
|
518 |
|
|
asoc = q->asoc;
|
519 |
|
|
lqueue = &q->retransmit;
|
520 |
|
|
|
521 |
|
|
/* RFC 2960 6.3.3 Handle T3-rtx Expiration
|
522 |
|
|
*
|
523 |
|
|
* E3) Determine how many of the earliest (i.e., lowest TSN)
|
524 |
|
|
* outstanding DATA chunks for the address for which the
|
525 |
|
|
* T3-rtx has expired will fit into a single packet, subject
|
526 |
|
|
* to the MTU constraint for the path corresponding to the
|
527 |
|
|
* destination transport address to which the retransmission
|
528 |
|
|
* is being sent (this may be different from the address for
|
529 |
|
|
* which the timer expires [see Section 6.4]). Call this value
|
530 |
|
|
* K. Bundle and retransmit those K DATA chunks in a single
|
531 |
|
|
* packet to the destination endpoint.
|
532 |
|
|
*
|
533 |
|
|
* [Just to be painfully clear, if we are retransmitting
|
534 |
|
|
* because a timeout just happened, we should send only ONE
|
535 |
|
|
* packet of retransmitted data.]
|
536 |
|
|
*/
|
537 |
|
|
lchunk = sctp_list_dequeue(lqueue);
|
538 |
|
|
|
539 |
|
|
while (lchunk) {
|
540 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
541 |
|
|
transmitted_list);
|
542 |
|
|
|
543 |
|
|
/* Make sure that Gap Acked TSNs are not retransmitted. A
|
544 |
|
|
* simple approach is just to move such TSNs out of the
|
545 |
|
|
* way and into a 'transmitted' queue and skip to the
|
546 |
|
|
* next chunk.
|
547 |
|
|
*/
|
548 |
|
|
if (chunk->tsn_gap_acked) {
|
549 |
|
|
list_add_tail(lchunk, &transport->transmitted);
|
550 |
|
|
lchunk = sctp_list_dequeue(lqueue);
|
551 |
|
|
continue;
|
552 |
|
|
}
|
553 |
|
|
|
554 |
|
|
/* Attempt to append this chunk to the packet. */
|
555 |
|
|
status = (*q->append_output)(pkt, chunk);
|
556 |
|
|
|
557 |
|
|
switch (status) {
|
558 |
|
|
case SCTP_XMIT_PMTU_FULL:
|
559 |
|
|
/* Send this packet. */
|
560 |
|
|
if ((error = (*q->force_output)(pkt)) == 0)
|
561 |
|
|
*start_timer = 1;
|
562 |
|
|
|
563 |
|
|
/* If we are retransmitting, we should only
|
564 |
|
|
* send a single packet.
|
565 |
|
|
*/
|
566 |
|
|
if (rtx_timeout) {
|
567 |
|
|
list_add(lchunk, lqueue);
|
568 |
|
|
lchunk = NULL;
|
569 |
|
|
}
|
570 |
|
|
|
571 |
|
|
/* Bundle lchunk in the next round. */
|
572 |
|
|
break;
|
573 |
|
|
|
574 |
|
|
case SCTP_XMIT_RWND_FULL:
|
575 |
|
|
/* Send this packet. */
|
576 |
|
|
if ((error = (*q->force_output)(pkt)) == 0)
|
577 |
|
|
*start_timer = 1;
|
578 |
|
|
|
579 |
|
|
/* Stop sending DATA as there is no more room
|
580 |
|
|
* at the receiver.
|
581 |
|
|
*/
|
582 |
|
|
list_add(lchunk, lqueue);
|
583 |
|
|
lchunk = NULL;
|
584 |
|
|
break;
|
585 |
|
|
|
586 |
|
|
default:
|
587 |
|
|
/* The append was successful, so add this chunk to
|
588 |
|
|
* the transmitted list.
|
589 |
|
|
*/
|
590 |
|
|
list_add_tail(lchunk, &transport->transmitted);
|
591 |
|
|
*start_timer = 1;
|
592 |
|
|
q->empty = 0;
|
593 |
|
|
|
594 |
|
|
/* Retrieve a new chunk to bundle. */
|
595 |
|
|
lchunk = sctp_list_dequeue(lqueue);
|
596 |
|
|
break;
|
597 |
|
|
};
|
598 |
|
|
}
|
599 |
|
|
|
600 |
|
|
return error;
|
601 |
|
|
}
|
602 |
|
|
|
603 |
|
|
/* Cork the outqueue so queued chunks are really queued. */
|
604 |
|
|
int sctp_outq_uncork(struct sctp_outq *q)
|
605 |
|
|
{
|
606 |
|
|
int error = 0;
|
607 |
|
|
if (q->cork) {
|
608 |
|
|
q->cork = 0;
|
609 |
|
|
error = sctp_outq_flush(q, 0);
|
610 |
|
|
}
|
611 |
|
|
return error;
|
612 |
|
|
}
|
613 |
|
|
|
614 |
|
|
/*
|
615 |
|
|
* Try to flush an outqueue.
|
616 |
|
|
*
|
617 |
|
|
* Description: Send everything in q which we legally can, subject to
|
618 |
|
|
* congestion limitations.
|
619 |
|
|
* * Note: This function can be called from multiple contexts so appropriate
|
620 |
|
|
* locking concerns must be made. Today we use the sock lock to protect
|
621 |
|
|
* this function.
|
622 |
|
|
*/
|
623 |
|
|
int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
|
624 |
|
|
{
|
625 |
|
|
struct sctp_packet *packet;
|
626 |
|
|
struct sctp_packet singleton;
|
627 |
|
|
struct sctp_association *asoc = q->asoc;
|
628 |
|
|
int ecn_capable = asoc->peer.ecn_capable;
|
629 |
|
|
__u16 sport = asoc->base.bind_addr.port;
|
630 |
|
|
__u16 dport = asoc->peer.port;
|
631 |
|
|
__u32 vtag = asoc->peer.i.init_tag;
|
632 |
|
|
/* This is the ECNE handler for singleton packets. */
|
633 |
|
|
sctp_packet_phandler_t *s_ecne_handler = NULL;
|
634 |
|
|
sctp_packet_phandler_t *ecne_handler = NULL;
|
635 |
|
|
struct sk_buff_head *queue;
|
636 |
|
|
struct sctp_transport *transport = NULL;
|
637 |
|
|
struct sctp_transport *new_transport;
|
638 |
|
|
struct sctp_chunk *chunk;
|
639 |
|
|
sctp_xmit_t status;
|
640 |
|
|
int error = 0;
|
641 |
|
|
int start_timer = 0;
|
642 |
|
|
|
643 |
|
|
/* These transports have chunks to send. */
|
644 |
|
|
struct list_head transport_list;
|
645 |
|
|
struct list_head *ltransport;
|
646 |
|
|
|
647 |
|
|
INIT_LIST_HEAD(&transport_list);
|
648 |
|
|
packet = NULL;
|
649 |
|
|
|
650 |
|
|
/*
|
651 |
|
|
* 6.10 Bundling
|
652 |
|
|
* ...
|
653 |
|
|
* When bundling control chunks with DATA chunks, an
|
654 |
|
|
* endpoint MUST place control chunks first in the outbound
|
655 |
|
|
* SCTP packet. The transmitter MUST transmit DATA chunks
|
656 |
|
|
* within a SCTP packet in increasing order of TSN.
|
657 |
|
|
* ...
|
658 |
|
|
*/
|
659 |
|
|
if (ecn_capable) {
|
660 |
|
|
s_ecne_handler = &sctp_get_no_prepend;
|
661 |
|
|
ecne_handler = &sctp_get_ecne_prepend;
|
662 |
|
|
}
|
663 |
|
|
|
664 |
|
|
queue = &q->control;
|
665 |
|
|
while ((chunk = (struct sctp_chunk *)skb_dequeue(queue))) {
|
666 |
|
|
/* Pick the right transport to use. */
|
667 |
|
|
new_transport = chunk->transport;
|
668 |
|
|
|
669 |
|
|
if (!new_transport) {
|
670 |
|
|
new_transport = asoc->peer.active_path;
|
671 |
|
|
} else if (!new_transport->active) {
|
672 |
|
|
/* If the chunk is Heartbeat, send it to
|
673 |
|
|
* chunk->transport, even it's inactive.
|
674 |
|
|
*/
|
675 |
|
|
if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT)
|
676 |
|
|
new_transport = asoc->peer.active_path;
|
677 |
|
|
}
|
678 |
|
|
|
679 |
|
|
/* Are we switching transports?
|
680 |
|
|
* Take care of transport locks.
|
681 |
|
|
*/
|
682 |
|
|
if (new_transport != transport) {
|
683 |
|
|
transport = new_transport;
|
684 |
|
|
if (list_empty(&transport->send_ready)) {
|
685 |
|
|
list_add_tail(&transport->send_ready,
|
686 |
|
|
&transport_list);
|
687 |
|
|
}
|
688 |
|
|
packet = &transport->packet;
|
689 |
|
|
(*q->config_output)(packet, vtag,
|
690 |
|
|
ecn_capable, ecne_handler);
|
691 |
|
|
}
|
692 |
|
|
|
693 |
|
|
switch (chunk->chunk_hdr->type) {
|
694 |
|
|
/*
|
695 |
|
|
* 6.10 Bundling
|
696 |
|
|
* ...
|
697 |
|
|
* An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
|
698 |
|
|
* COMPLETE with any other chunks. [Send them immediately.]
|
699 |
|
|
*/
|
700 |
|
|
case SCTP_CID_INIT:
|
701 |
|
|
case SCTP_CID_INIT_ACK:
|
702 |
|
|
case SCTP_CID_SHUTDOWN_COMPLETE:
|
703 |
|
|
(*q->init_output)(&singleton, transport, sport, dport);
|
704 |
|
|
(*q->config_output)(&singleton, vtag, ecn_capable,
|
705 |
|
|
s_ecne_handler);
|
706 |
|
|
(void) (*q->build_output)(&singleton, chunk);
|
707 |
|
|
error = (*q->force_output)(&singleton);
|
708 |
|
|
if (error < 0)
|
709 |
|
|
return error;
|
710 |
|
|
break;
|
711 |
|
|
|
712 |
|
|
case SCTP_CID_ABORT:
|
713 |
|
|
case SCTP_CID_SACK:
|
714 |
|
|
case SCTP_CID_HEARTBEAT:
|
715 |
|
|
case SCTP_CID_HEARTBEAT_ACK:
|
716 |
|
|
case SCTP_CID_SHUTDOWN:
|
717 |
|
|
case SCTP_CID_SHUTDOWN_ACK:
|
718 |
|
|
case SCTP_CID_ERROR:
|
719 |
|
|
case SCTP_CID_COOKIE_ECHO:
|
720 |
|
|
case SCTP_CID_COOKIE_ACK:
|
721 |
|
|
case SCTP_CID_ECN_ECNE:
|
722 |
|
|
case SCTP_CID_ECN_CWR:
|
723 |
|
|
(void) (*q->build_output)(packet, chunk);
|
724 |
|
|
break;
|
725 |
|
|
|
726 |
|
|
case SCTP_CID_ASCONF:
|
727 |
|
|
case SCTP_CID_ASCONF_ACK:
|
728 |
|
|
(void) (*q->build_output)(packet, chunk);
|
729 |
|
|
break;
|
730 |
|
|
|
731 |
|
|
default:
|
732 |
|
|
/* We built a chunk with an illegal type! */
|
733 |
|
|
BUG();
|
734 |
|
|
};
|
735 |
|
|
}
|
736 |
|
|
|
737 |
|
|
/* Is it OK to send data chunks? */
|
738 |
|
|
switch (asoc->state) {
|
739 |
|
|
case SCTP_STATE_COOKIE_ECHOED:
|
740 |
|
|
/* Only allow bundling when this packet has a COOKIE-ECHO
|
741 |
|
|
* chunk.
|
742 |
|
|
*/
|
743 |
|
|
if (!packet || !packet->has_cookie_echo)
|
744 |
|
|
break;
|
745 |
|
|
|
746 |
|
|
/* fallthru */
|
747 |
|
|
case SCTP_STATE_ESTABLISHED:
|
748 |
|
|
case SCTP_STATE_SHUTDOWN_PENDING:
|
749 |
|
|
case SCTP_STATE_SHUTDOWN_RECEIVED:
|
750 |
|
|
/*
|
751 |
|
|
* RFC 2960 6.1 Transmission of DATA Chunks
|
752 |
|
|
*
|
753 |
|
|
* C) When the time comes for the sender to transmit,
|
754 |
|
|
* before sending new DATA chunks, the sender MUST
|
755 |
|
|
* first transmit any outstanding DATA chunks which
|
756 |
|
|
* are marked for retransmission (limited by the
|
757 |
|
|
* current cwnd).
|
758 |
|
|
*/
|
759 |
|
|
if (!list_empty(&q->retransmit)) {
|
760 |
|
|
if (transport == asoc->peer.retran_path)
|
761 |
|
|
goto retran;
|
762 |
|
|
|
763 |
|
|
/* Switch transports & prepare the packet. */
|
764 |
|
|
|
765 |
|
|
transport = asoc->peer.retran_path;
|
766 |
|
|
|
767 |
|
|
if (list_empty(&transport->send_ready)) {
|
768 |
|
|
list_add_tail(&transport->send_ready,
|
769 |
|
|
&transport_list);
|
770 |
|
|
}
|
771 |
|
|
|
772 |
|
|
packet = &transport->packet;
|
773 |
|
|
(*q->config_output)(packet, vtag,
|
774 |
|
|
ecn_capable, ecne_handler);
|
775 |
|
|
retran:
|
776 |
|
|
error = sctp_outq_flush_rtx(q, packet,
|
777 |
|
|
rtx_timeout, &start_timer);
|
778 |
|
|
|
779 |
|
|
if (start_timer)
|
780 |
|
|
sctp_transport_reset_timers(transport);
|
781 |
|
|
|
782 |
|
|
/* This can happen on COOKIE-ECHO resend. Only
|
783 |
|
|
* one chunk can get bundled with a COOKIE-ECHO.
|
784 |
|
|
*/
|
785 |
|
|
if (packet->has_cookie_echo)
|
786 |
|
|
goto sctp_flush_out;
|
787 |
|
|
|
788 |
|
|
/* Don't send new data if there is still data
|
789 |
|
|
* waiting to retransmit.
|
790 |
|
|
*/
|
791 |
|
|
if (!list_empty(&q->retransmit))
|
792 |
|
|
goto sctp_flush_out;
|
793 |
|
|
}
|
794 |
|
|
|
795 |
|
|
/* Finally, transmit new packets. */
|
796 |
|
|
start_timer = 0;
|
797 |
|
|
queue = &q->out;
|
798 |
|
|
|
799 |
|
|
while ((chunk = sctp_outq_dequeue_data(q))) {
|
800 |
|
|
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
|
801 |
|
|
* stream identifier.
|
802 |
|
|
*/
|
803 |
|
|
if (chunk->sinfo.sinfo_stream >=
|
804 |
|
|
asoc->c.sinit_num_ostreams) {
|
805 |
|
|
|
806 |
|
|
/* Mark as s failed send. */
|
807 |
|
|
sctp_datamsg_fail(chunk, SCTP_ERROR_INV_STRM);
|
808 |
|
|
sctp_chunk_free(chunk);
|
809 |
|
|
continue;
|
810 |
|
|
}
|
811 |
|
|
|
812 |
|
|
/* Has this chunk expired? */
|
813 |
|
|
if (sctp_datamsg_expires(chunk)) {
|
814 |
|
|
sctp_datamsg_fail(chunk, 0);
|
815 |
|
|
sctp_chunk_free(chunk);
|
816 |
|
|
continue;
|
817 |
|
|
}
|
818 |
|
|
|
819 |
|
|
/* If there is a specified transport, use it.
|
820 |
|
|
* Otherwise, we want to use the active path.
|
821 |
|
|
*/
|
822 |
|
|
new_transport = chunk->transport;
|
823 |
|
|
if (!new_transport || !new_transport->active)
|
824 |
|
|
new_transport = asoc->peer.active_path;
|
825 |
|
|
|
826 |
|
|
/* Change packets if necessary. */
|
827 |
|
|
if (new_transport != transport) {
|
828 |
|
|
transport = new_transport;
|
829 |
|
|
|
830 |
|
|
/* Schedule to have this transport's
|
831 |
|
|
* packet flushed.
|
832 |
|
|
*/
|
833 |
|
|
if (list_empty(&transport->send_ready)) {
|
834 |
|
|
list_add_tail(&transport->send_ready,
|
835 |
|
|
&transport_list);
|
836 |
|
|
}
|
837 |
|
|
|
838 |
|
|
packet = &transport->packet;
|
839 |
|
|
(*q->config_output)(packet, vtag,
|
840 |
|
|
ecn_capable, ecne_handler);
|
841 |
|
|
}
|
842 |
|
|
|
843 |
|
|
SCTP_DEBUG_PRINTK("sctp_transmit_packet(%p, %p[%s]), ",
|
844 |
|
|
q, chunk,
|
845 |
|
|
chunk && chunk->chunk_hdr ?
|
846 |
|
|
sctp_cname(SCTP_ST_CHUNK(
|
847 |
|
|
chunk->chunk_hdr->type))
|
848 |
|
|
: "Illegal Chunk");
|
849 |
|
|
|
850 |
|
|
SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
|
851 |
|
|
"%p skb->users %d.\n",
|
852 |
|
|
ntohl(chunk->subh.data_hdr->tsn),
|
853 |
|
|
chunk->skb ?chunk->skb->head : 0,
|
854 |
|
|
chunk->skb ?
|
855 |
|
|
atomic_read(&chunk->skb->users) : -1);
|
856 |
|
|
|
857 |
|
|
/* Add the chunk to the packet. */
|
858 |
|
|
status = (*q->build_output)(packet, chunk);
|
859 |
|
|
|
860 |
|
|
switch (status) {
|
861 |
|
|
case SCTP_XMIT_PMTU_FULL:
|
862 |
|
|
case SCTP_XMIT_RWND_FULL:
|
863 |
|
|
case SCTP_XMIT_NAGLE_DELAY:
|
864 |
|
|
/* We could not append this chunk, so put
|
865 |
|
|
* the chunk back on the output queue.
|
866 |
|
|
*/
|
867 |
|
|
SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
|
868 |
|
|
"not transmit TSN: 0x%x, status: %d\n",
|
869 |
|
|
ntohl(chunk->subh.data_hdr->tsn),
|
870 |
|
|
status);
|
871 |
|
|
sctp_outq_head_data(q, chunk);
|
872 |
|
|
goto sctp_flush_out;
|
873 |
|
|
break;
|
874 |
|
|
|
875 |
|
|
case SCTP_XMIT_OK:
|
876 |
|
|
break;
|
877 |
|
|
|
878 |
|
|
default:
|
879 |
|
|
BUG();
|
880 |
|
|
}
|
881 |
|
|
|
882 |
|
|
/* BUG: We assume that the (*q->force_output())
|
883 |
|
|
* call below will succeed all the time and add the
|
884 |
|
|
* chunk to the transmitted list and restart the
|
885 |
|
|
* timers.
|
886 |
|
|
* It is possible that the call can fail under OOM
|
887 |
|
|
* conditions.
|
888 |
|
|
*
|
889 |
|
|
* Is this really a problem? Won't this behave
|
890 |
|
|
* like a lost TSN?
|
891 |
|
|
*/
|
892 |
|
|
list_add_tail(&chunk->transmitted_list,
|
893 |
|
|
&transport->transmitted);
|
894 |
|
|
|
895 |
|
|
sctp_transport_reset_timers(transport);
|
896 |
|
|
|
897 |
|
|
q->empty = 0;
|
898 |
|
|
|
899 |
|
|
/* Only let one DATA chunk get bundled with a
|
900 |
|
|
* COOKIE-ECHO chunk.
|
901 |
|
|
*/
|
902 |
|
|
if (packet->has_cookie_echo)
|
903 |
|
|
goto sctp_flush_out;
|
904 |
|
|
}
|
905 |
|
|
break;
|
906 |
|
|
|
907 |
|
|
default:
|
908 |
|
|
/* Do nothing. */
|
909 |
|
|
break;
|
910 |
|
|
}
|
911 |
|
|
|
912 |
|
|
sctp_flush_out:
|
913 |
|
|
|
914 |
|
|
/* Before returning, examine all the transports touched in
|
915 |
|
|
* this call. Right now, we bluntly force clear all the
|
916 |
|
|
* transports. Things might change after we implement Nagle.
|
917 |
|
|
* But such an examination is still required.
|
918 |
|
|
*
|
919 |
|
|
* --xguo
|
920 |
|
|
*/
|
921 |
|
|
while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
|
922 |
|
|
struct sctp_transport *t = list_entry(ltransport,
|
923 |
|
|
struct sctp_transport,
|
924 |
|
|
send_ready);
|
925 |
|
|
if (t != transport)
|
926 |
|
|
transport = t;
|
927 |
|
|
|
928 |
|
|
packet = &transport->packet;
|
929 |
|
|
if (packet->size != SCTP_IP_OVERHEAD)
|
930 |
|
|
error = (*q->force_output)(packet);
|
931 |
|
|
}
|
932 |
|
|
|
933 |
|
|
return error;
|
934 |
|
|
}
|
935 |
|
|
|
936 |
|
|
/* Set the various output handling callbacks. */
|
937 |
|
|
int sctp_outq_set_output_handlers(struct sctp_outq *q,
|
938 |
|
|
sctp_outq_ohandler_init_t init,
|
939 |
|
|
sctp_outq_ohandler_config_t config,
|
940 |
|
|
sctp_outq_ohandler_t append,
|
941 |
|
|
sctp_outq_ohandler_t build,
|
942 |
|
|
sctp_outq_ohandler_force_t force)
|
943 |
|
|
{
|
944 |
|
|
q->init_output = init;
|
945 |
|
|
q->config_output = config;
|
946 |
|
|
q->append_output = append;
|
947 |
|
|
q->build_output = build;
|
948 |
|
|
q->force_output = force;
|
949 |
|
|
return 0;
|
950 |
|
|
}
|
951 |
|
|
|
952 |
|
|
/* Update unack_data based on the incoming SACK chunk */
|
953 |
|
|
static void sctp_sack_update_unack_data(struct sctp_association *assoc,
|
954 |
|
|
struct sctp_sackhdr *sack)
|
955 |
|
|
{
|
956 |
|
|
sctp_sack_variable_t *frags;
|
957 |
|
|
__u16 unack_data;
|
958 |
|
|
int i;
|
959 |
|
|
|
960 |
|
|
unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
|
961 |
|
|
|
962 |
|
|
frags = sack->variable;
|
963 |
|
|
for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
|
964 |
|
|
unack_data -= ((ntohs(frags[i].gab.end) -
|
965 |
|
|
ntohs(frags[i].gab.start) + 1));
|
966 |
|
|
}
|
967 |
|
|
|
968 |
|
|
assoc->unack_data = unack_data;
|
969 |
|
|
}
|
970 |
|
|
|
971 |
|
|
/* Return the highest new tsn that is acknowledged by the given SACK chunk. */
|
972 |
|
|
static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack,
|
973 |
|
|
struct sctp_association *asoc)
|
974 |
|
|
{
|
975 |
|
|
struct list_head *ltransport, *lchunk;
|
976 |
|
|
struct sctp_transport *transport;
|
977 |
|
|
struct sctp_chunk *chunk;
|
978 |
|
|
__u32 highest_new_tsn, tsn;
|
979 |
|
|
struct list_head *transport_list = &asoc->peer.transport_addr_list;
|
980 |
|
|
|
981 |
|
|
highest_new_tsn = ntohl(sack->cum_tsn_ack);
|
982 |
|
|
|
983 |
|
|
list_for_each(ltransport, transport_list) {
|
984 |
|
|
transport = list_entry(ltransport, struct sctp_transport,
|
985 |
|
|
transports);
|
986 |
|
|
list_for_each(lchunk, &transport->transmitted) {
|
987 |
|
|
chunk = list_entry(lchunk, struct sctp_chunk,
|
988 |
|
|
transmitted_list);
|
989 |
|
|
tsn = ntohl(chunk->subh.data_hdr->tsn);
|
990 |
|
|
|
991 |
|
|
if (!chunk->tsn_gap_acked &&
|
992 |
|
|
TSN_lt(highest_new_tsn, tsn) &&
|
993 |
|
|
sctp_acked(sack, tsn))
|
994 |
|
|
highest_new_tsn = tsn;
|
995 |
|
|
}
|
996 |
|
|
}
|
997 |
|
|
|
998 |
|
|
return highest_new_tsn;
|
999 |
|
|
}
|
1000 |
|
|
|
1001 |
|
|
/* This is where we REALLY process a SACK.
|
1002 |
|
|
*
|
1003 |
|
|
* Process the SACK against the outqueue. Mostly, this just frees
|
1004 |
|
|
* things off the transmitted queue.
|
1005 |
|
|
*/
|
1006 |
|
|
int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
|
1007 |
|
|
{
|
1008 |
|
|
struct sctp_association *asoc = q->asoc;
|
1009 |
|
|
struct sctp_transport *transport;
|
1010 |
|
|
struct sctp_chunk *tchunk;
|
1011 |
|
|
struct list_head *lchunk, *transport_list, *pos, *temp;
|
1012 |
|
|
sctp_sack_variable_t *frags = sack->variable;
|
1013 |
|
|
__u32 sack_ctsn, ctsn, tsn;
|
1014 |
|
|
__u32 highest_tsn, highest_new_tsn;
|
1015 |
|
|
__u32 sack_a_rwnd;
|
1016 |
|
|
unsigned outstanding;
|
1017 |
|
|
struct sctp_transport *primary = asoc->peer.primary_path;
|
1018 |
|
|
int count_of_newacks = 0;
|
1019 |
|
|
|
1020 |
|
|
/* Grab the association's destination address list. */
|
1021 |
|
|
transport_list = &asoc->peer.transport_addr_list;
|
1022 |
|
|
|
1023 |
|
|
sack_ctsn = ntohl(sack->cum_tsn_ack);
|
1024 |
|
|
|
1025 |
|
|
/*
|
1026 |
|
|
* SFR-CACC algorithm:
|
1027 |
|
|
* On receipt of a SACK the sender SHOULD execute the
|
1028 |
|
|
* following statements.
|
1029 |
|
|
*
|
1030 |
|
|
* 1) If the cumulative ack in the SACK passes next tsn_at_change
|
1031 |
|
|
* on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
|
1032 |
|
|
* cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
|
1033 |
|
|
* all destinations.
|
1034 |
|
|
*/
|
1035 |
|
|
if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
|
1036 |
|
|
primary->cacc.changeover_active = 0;
|
1037 |
|
|
list_for_each(pos, transport_list) {
|
1038 |
|
|
transport = list_entry(pos, struct sctp_transport,
|
1039 |
|
|
transports);
|
1040 |
|
|
transport->cacc.cycling_changeover = 0;
|
1041 |
|
|
}
|
1042 |
|
|
}
|
1043 |
|
|
|
1044 |
|
|
/*
|
1045 |
|
|
* SFR-CACC algorithm:
|
1046 |
|
|
* 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
|
1047 |
|
|
* is set the receiver of the SACK MUST take the following actions:
|
1048 |
|
|
*
|
1049 |
|
|
* A) Initialize the cacc_saw_newack to 0 for all destination
|
1050 |
|
|
* addresses.
|
1051 |
|
|
*/
|
1052 |
|
|
if (sack->num_gap_ack_blocks > 0 &&
|
1053 |
|
|
primary->cacc.changeover_active) {
|
1054 |
|
|
list_for_each(pos, transport_list) {
|
1055 |
|
|
transport = list_entry(pos, struct sctp_transport,
|
1056 |
|
|
transports);
|
1057 |
|
|
transport->cacc.cacc_saw_newack = 0;
|
1058 |
|
|
}
|
1059 |
|
|
}
|
1060 |
|
|
|
1061 |
|
|
/* Get the highest TSN in the sack. */
|
1062 |
|
|
highest_tsn = sack_ctsn +
|
1063 |
|
|
ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end);
|
1064 |
|
|
|
1065 |
|
|
if (TSN_lt(asoc->highest_sacked, highest_tsn)) {
|
1066 |
|
|
highest_new_tsn = highest_tsn;
|
1067 |
|
|
asoc->highest_sacked = highest_tsn;
|
1068 |
|
|
} else {
|
1069 |
|
|
highest_new_tsn = sctp_highest_new_tsn(sack, asoc);
|
1070 |
|
|
}
|
1071 |
|
|
|
1072 |
|
|
/* Run through the retransmit queue. Credit bytes received
|
1073 |
|
|
* and free those chunks that we can.
|
1074 |
|
|
*/
|
1075 |
|
|
sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn);
|
1076 |
|
|
sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0);
|
1077 |
|
|
|
1078 |
|
|
/* Run through the transmitted queue.
|
1079 |
|
|
* Credit bytes received and free those chunks which we can.
|
1080 |
|
|
*
|
1081 |
|
|
* This is a MASSIVE candidate for optimization.
|
1082 |
|
|
*/
|
1083 |
|
|
list_for_each(pos, transport_list) {
|
1084 |
|
|
transport = list_entry(pos, struct sctp_transport,
|
1085 |
|
|
transports);
|
1086 |
|
|
sctp_check_transmitted(q, &transport->transmitted,
|
1087 |
|
|
transport, sack, highest_new_tsn);
|
1088 |
|
|
/*
|
1089 |
|
|
* SFR-CACC algorithm:
|
1090 |
|
|
* C) Let count_of_newacks be the number of
|
1091 |
|
|
* destinations for which cacc_saw_newack is set.
|
1092 |
|
|
*/
|
1093 |
|
|
if (transport->cacc.cacc_saw_newack)
|
1094 |
|
|
count_of_newacks ++;
|
1095 |
|
|
}
|
1096 |
|
|
|
1097 |
|
|
list_for_each(pos, transport_list) {
|
1098 |
|
|
transport = list_entry(pos, struct sctp_transport,
|
1099 |
|
|
transports);
|
1100 |
|
|
sctp_mark_missing(q, &transport->transmitted, transport,
|
1101 |
|
|
highest_new_tsn, count_of_newacks);
|
1102 |
|
|
}
|
1103 |
|
|
|
1104 |
|
|
/* Move the Cumulative TSN Ack Point if appropriate. */
|
1105 |
|
|
if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn))
|
1106 |
|
|
asoc->ctsn_ack_point = sack_ctsn;
|
1107 |
|
|
|
1108 |
|
|
/* Update unack_data field in the assoc. */
|
1109 |
|
|
sctp_sack_update_unack_data(asoc, sack);
|
1110 |
|
|
|
1111 |
|
|
ctsn = asoc->ctsn_ack_point;
|
1112 |
|
|
|
1113 |
|
|
SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
|
1114 |
|
|
__FUNCTION__, sack_ctsn);
|
1115 |
|
|
SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association "
|
1116 |
|
|
"%p is 0x%x.\n", __FUNCTION__, asoc, ctsn);
|
1117 |
|
|
|
1118 |
|
|
/* Throw away stuff rotting on the sack queue. */
|
1119 |
|
|
list_for_each_safe(lchunk, temp, &q->sacked) {
|
1120 |
|
|
tchunk = list_entry(lchunk, struct sctp_chunk,
|
1121 |
|
|
transmitted_list);
|
1122 |
|
|
tsn = ntohl(tchunk->subh.data_hdr->tsn);
|
1123 |
|
|
if (TSN_lte(tsn, ctsn))
|
1124 |
|
|
sctp_chunk_free(tchunk);
|
1125 |
|
|
}
|
1126 |
|
|
|
1127 |
|
|
/* ii) Set rwnd equal to the newly received a_rwnd minus the
|
1128 |
|
|
* number of bytes still outstanding after processing the
|
1129 |
|
|
* Cumulative TSN Ack and the Gap Ack Blocks.
|
1130 |
|
|
*/
|
1131 |
|
|
|
1132 |
|
|
sack_a_rwnd = ntohl(sack->a_rwnd);
|
1133 |
|
|
outstanding = q->outstanding_bytes;
|
1134 |
|
|
|
1135 |
|
|
if (outstanding < sack_a_rwnd)
|
1136 |
|
|
sack_a_rwnd -= outstanding;
|
1137 |
|
|
else
|
1138 |
|
|
sack_a_rwnd = 0;
|
1139 |
|
|
|
1140 |
|
|
asoc->peer.rwnd = sack_a_rwnd;
|
1141 |
|
|
|
1142 |
|
|
/* See if all chunks are acked.
|
1143 |
|
|
* Make sure the empty queue handler will get run later.
|
1144 |
|
|
*/
|
1145 |
|
|
q->empty = skb_queue_empty(&q->out) && list_empty(&q->retransmit);
|
1146 |
|
|
if (!q->empty)
|
1147 |
|
|
goto finish;
|
1148 |
|
|
|
1149 |
|
|
list_for_each(pos, transport_list) {
|
1150 |
|
|
transport = list_entry(pos, struct sctp_transport,
|
1151 |
|
|
transports);
|
1152 |
|
|
q->empty = q->empty && list_empty(&transport->transmitted);
|
1153 |
|
|
if (!q->empty)
|
1154 |
|
|
goto finish;
|
1155 |
|
|
}
|
1156 |
|
|
|
1157 |
|
|
SCTP_DEBUG_PRINTK("sack queue is empty.\n");
|
1158 |
|
|
finish:
|
1159 |
|
|
return q->empty;
|
1160 |
|
|
}
|
1161 |
|
|
|
1162 |
|
|
/* Is the outqueue empty? */
|
1163 |
|
|
int sctp_outq_is_empty(const struct sctp_outq *q)
|
1164 |
|
|
{
|
1165 |
|
|
return q->empty;
|
1166 |
|
|
}
|
1167 |
|
|
|
1168 |
|
|
/********************************************************************
|
1169 |
|
|
* 2nd Level Abstractions
|
1170 |
|
|
********************************************************************/
|
1171 |
|
|
|
1172 |
|
|
/* Go through a transport's transmitted list or the association's retransmit
|
1173 |
|
|
* list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
|
1174 |
|
|
* The retransmit list will not have an associated transport.
|
1175 |
|
|
*
|
1176 |
|
|
* I added coherent debug information output. --xguo
|
1177 |
|
|
*
|
1178 |
|
|
* Instead of printing 'sacked' or 'kept' for each TSN on the
|
1179 |
|
|
* transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
|
1180 |
|
|
* KEPT TSN6-TSN7, etc.
|
1181 |
|
|
*/
|
1182 |
|
|
static void sctp_check_transmitted(struct sctp_outq *q,
|
1183 |
|
|
struct list_head *transmitted_queue,
|
1184 |
|
|
struct sctp_transport *transport,
|
1185 |
|
|
struct sctp_sackhdr *sack,
|
1186 |
|
|
__u32 highest_new_tsn_in_sack)
|
1187 |
|
|
{
|
1188 |
|
|
struct list_head *lchunk;
|
1189 |
|
|
struct sctp_chunk *tchunk;
|
1190 |
|
|
struct list_head tlist;
|
1191 |
|
|
__u32 tsn;
|
1192 |
|
|
__u32 sack_ctsn;
|
1193 |
|
|
__u32 rtt;
|
1194 |
|
|
__u8 restart_timer = 0;
|
1195 |
|
|
int bytes_acked = 0;
|
1196 |
|
|
|
1197 |
|
|
/* These state variables are for coherent debug output. --xguo */
|
1198 |
|
|
|
1199 |
|
|
#if SCTP_DEBUG
|
1200 |
|
|
__u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
|
1201 |
|
|
__u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
|
1202 |
|
|
__u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
|
1203 |
|
|
__u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
|
1204 |
|
|
|
1205 |
|
|
/* 0 : The last TSN was ACKed.
|
1206 |
|
|
* 1 : The last TSN was NOT ACKed (i.e. KEPT).
|
1207 |
|
|
* -1: We need to initialize.
|
1208 |
|
|
*/
|
1209 |
|
|
int dbg_prt_state = -1;
|
1210 |
|
|
#endif /* SCTP_DEBUG */
|
1211 |
|
|
|
1212 |
|
|
sack_ctsn = ntohl(sack->cum_tsn_ack);
|
1213 |
|
|
|
1214 |
|
|
INIT_LIST_HEAD(&tlist);
|
1215 |
|
|
|
1216 |
|
|
/* The while loop will skip empty transmitted queues. */
|
1217 |
|
|
while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
|
1218 |
|
|
tchunk = list_entry(lchunk, struct sctp_chunk,
|
1219 |
|
|
transmitted_list);
|
1220 |
|
|
|
1221 |
|
|
tsn = ntohl(tchunk->subh.data_hdr->tsn);
|
1222 |
|
|
if (sctp_acked(sack, tsn)) {
|
1223 |
|
|
/* If this queue is the retransmit queue, the
|
1224 |
|
|
* retransmit timer has already reclaimed
|
1225 |
|
|
* the outstanding bytes for this chunk, so only
|
1226 |
|
|
* count bytes associated with a transport.
|
1227 |
|
|
*/
|
1228 |
|
|
if (transport) {
|
1229 |
|
|
/* If this chunk is being used for RTT
|
1230 |
|
|
* measurement, calculate the RTT and update
|
1231 |
|
|
* the RTO using this value.
|
1232 |
|
|
*
|
1233 |
|
|
* 6.3.1 C5) Karn's algorithm: RTT measurements
|
1234 |
|
|
* MUST NOT be made using packets that were
|
1235 |
|
|
* retransmitted (and thus for which it is
|
1236 |
|
|
* ambiguous whether the reply was for the
|
1237 |
|
|
* first instance of the packet or a later
|
1238 |
|
|
* instance).
|
1239 |
|
|
*/
|
1240 |
|
|
if (!tchunk->tsn_gap_acked &&
|
1241 |
|
|
!tchunk->resent &&
|
1242 |
|
|
tchunk->rtt_in_progress) {
|
1243 |
|
|
rtt = jiffies - tchunk->sent_at;
|
1244 |
|
|
sctp_transport_update_rto(transport,
|
1245 |
|
|
rtt);
|
1246 |
|
|
}
|
1247 |
|
|
}
|
1248 |
|
|
if (TSN_lte(tsn, sack_ctsn)) {
|
1249 |
|
|
/* RFC 2960 6.3.2 Retransmission Timer Rules
|
1250 |
|
|
*
|
1251 |
|
|
* R3) Whenever a SACK is received
|
1252 |
|
|
* that acknowledges the DATA chunk
|
1253 |
|
|
* with the earliest outstanding TSN
|
1254 |
|
|
* for that address, restart T3-rtx
|
1255 |
|
|
* timer for that address with its
|
1256 |
|
|
* current RTO.
|
1257 |
|
|
*/
|
1258 |
|
|
restart_timer = 1;
|
1259 |
|
|
|
1260 |
|
|
if (!tchunk->tsn_gap_acked) {
|
1261 |
|
|
tchunk->tsn_gap_acked = 1;
|
1262 |
|
|
bytes_acked += sctp_data_size(tchunk);
|
1263 |
|
|
/*
|
1264 |
|
|
* SFR-CACC algorithm:
|
1265 |
|
|
* 2) If the SACK contains gap acks
|
1266 |
|
|
* and the flag CHANGEOVER_ACTIVE is
|
1267 |
|
|
* set the receiver of the SACK MUST
|
1268 |
|
|
* take the following action:
|
1269 |
|
|
*
|
1270 |
|
|
* B) For each TSN t being acked that
|
1271 |
|
|
* has not been acked in any SACK so
|
1272 |
|
|
* far, set cacc_saw_newack to 1 for
|
1273 |
|
|
* the destination that the TSN was
|
1274 |
|
|
* sent to.
|
1275 |
|
|
*/
|
1276 |
|
|
if (transport &&
|
1277 |
|
|
sack->num_gap_ack_blocks &&
|
1278 |
|
|
q->asoc->peer.primary_path->cacc.
|
1279 |
|
|
changeover_active)
|
1280 |
|
|
transport->cacc.cacc_saw_newack
|
1281 |
|
|
= 1;
|
1282 |
|
|
}
|
1283 |
|
|
|
1284 |
|
|
list_add_tail(&tchunk->transmitted_list,
|
1285 |
|
|
&q->sacked);
|
1286 |
|
|
} else {
|
1287 |
|
|
/* RFC2960 7.2.4, sctpimpguide-05 2.8.2
|
1288 |
|
|
* M2) Each time a SACK arrives reporting
|
1289 |
|
|
* 'Stray DATA chunk(s)' record the highest TSN
|
1290 |
|
|
* reported as newly acknowledged, call this
|
1291 |
|
|
* value 'HighestTSNinSack'. A newly
|
1292 |
|
|
* acknowledged DATA chunk is one not
|
1293 |
|
|
* previously acknowledged in a SACK.
|
1294 |
|
|
*
|
1295 |
|
|
* When the SCTP sender of data receives a SACK
|
1296 |
|
|
* chunk that acknowledges, for the first time,
|
1297 |
|
|
* the receipt of a DATA chunk, all the still
|
1298 |
|
|
* unacknowledged DATA chunks whose TSN is
|
1299 |
|
|
* older than that newly acknowledged DATA
|
1300 |
|
|
* chunk, are qualified as 'Stray DATA chunks'.
|
1301 |
|
|
*/
|
1302 |
|
|
if (!tchunk->tsn_gap_acked) {
|
1303 |
|
|
tchunk->tsn_gap_acked = 1;
|
1304 |
|
|
bytes_acked += sctp_data_size(tchunk);
|
1305 |
|
|
}
|
1306 |
|
|
list_add_tail(lchunk, &tlist);
|
1307 |
|
|
}
|
1308 |
|
|
|
1309 |
|
|
#if SCTP_DEBUG
|
1310 |
|
|
switch (dbg_prt_state) {
|
1311 |
|
|
case 0: /* last TSN was ACKed */
|
1312 |
|
|
if (dbg_last_ack_tsn + 1 == tsn) {
|
1313 |
|
|
/* This TSN belongs to the
|
1314 |
|
|
* current ACK range.
|
1315 |
|
|
*/
|
1316 |
|
|
break;
|
1317 |
|
|
}
|
1318 |
|
|
|
1319 |
|
|
if (dbg_last_ack_tsn != dbg_ack_tsn) {
|
1320 |
|
|
/* Display the end of the
|
1321 |
|
|
* current range.
|
1322 |
|
|
*/
|
1323 |
|
|
SCTP_DEBUG_PRINTK("-%08x",
|
1324 |
|
|
dbg_last_ack_tsn);
|
1325 |
|
|
}
|
1326 |
|
|
|
1327 |
|
|
/* Start a new range. */
|
1328 |
|
|
SCTP_DEBUG_PRINTK(",%08x", tsn);
|
1329 |
|
|
dbg_ack_tsn = tsn;
|
1330 |
|
|
break;
|
1331 |
|
|
|
1332 |
|
|
case 1: /* The last TSN was NOT ACKed. */
|
1333 |
|
|
if (dbg_last_kept_tsn != dbg_kept_tsn) {
|
1334 |
|
|
/* Display the end of current range. */
|
1335 |
|
|
SCTP_DEBUG_PRINTK("-%08x",
|
1336 |
|
|
dbg_last_kept_tsn);
|
1337 |
|
|
}
|
1338 |
|
|
|
1339 |
|
|
SCTP_DEBUG_PRINTK("\n");
|
1340 |
|
|
|
1341 |
|
|
/* FALL THROUGH... */
|
1342 |
|
|
default:
|
1343 |
|
|
/* This is the first-ever TSN we examined. */
|
1344 |
|
|
/* Start a new range of ACK-ed TSNs. */
|
1345 |
|
|
SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
|
1346 |
|
|
dbg_prt_state = 0;
|
1347 |
|
|
dbg_ack_tsn = tsn;
|
1348 |
|
|
};
|
1349 |
|
|
|
1350 |
|
|
dbg_last_ack_tsn = tsn;
|
1351 |
|
|
#endif /* SCTP_DEBUG */
|
1352 |
|
|
|
1353 |
|
|
} else {
|
1354 |
|
|
if (tchunk->tsn_gap_acked) {
|
1355 |
|
|
SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
|
1356 |
|
|
"data TSN: 0x%x\n",
|
1357 |
|
|
__FUNCTION__,
|
1358 |
|
|
tsn);
|
1359 |
|
|
tchunk->tsn_gap_acked = 0;
|
1360 |
|
|
|
1361 |
|
|
bytes_acked -= sctp_data_size(tchunk);
|
1362 |
|
|
|
1363 |
|
|
/* RFC 2960 6.3.2 Retransmission Timer Rules
|
1364 |
|
|
*
|
1365 |
|
|
* R4) Whenever a SACK is received missing a
|
1366 |
|
|
* TSN that was previously acknowledged via a
|
1367 |
|
|
* Gap Ack Block, start T3-rtx for the
|
1368 |
|
|
* destination address to which the DATA
|
1369 |
|
|
* chunk was originally
|
1370 |
|
|
* transmitted if it is not already running.
|
1371 |
|
|
*/
|
1372 |
|
|
restart_timer = 1;
|
1373 |
|
|
}
|
1374 |
|
|
|
1375 |
|
|
list_add_tail(lchunk, &tlist);
|
1376 |
|
|
|
1377 |
|
|
#if SCTP_DEBUG
|
1378 |
|
|
/* See the above comments on ACK-ed TSNs. */
|
1379 |
|
|
switch (dbg_prt_state) {
|
1380 |
|
|
case 1:
|
1381 |
|
|
if (dbg_last_kept_tsn + 1 == tsn)
|
1382 |
|
|
break;
|
1383 |
|
|
|
1384 |
|
|
if (dbg_last_kept_tsn != dbg_kept_tsn)
|
1385 |
|
|
SCTP_DEBUG_PRINTK("-%08x",
|
1386 |
|
|
dbg_last_kept_tsn);
|
1387 |
|
|
|
1388 |
|
|
SCTP_DEBUG_PRINTK(",%08x", tsn);
|
1389 |
|
|
dbg_kept_tsn = tsn;
|
1390 |
|
|
break;
|
1391 |
|
|
|
1392 |
|
|
case 0:
|
1393 |
|
|
if (dbg_last_ack_tsn != dbg_ack_tsn)
|
1394 |
|
|
SCTP_DEBUG_PRINTK("-%08x",
|
1395 |
|
|
dbg_last_ack_tsn);
|
1396 |
|
|
SCTP_DEBUG_PRINTK("\n");
|
1397 |
|
|
|
1398 |
|
|
/* FALL THROUGH... */
|
1399 |
|
|
default:
|
1400 |
|
|
SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
|
1401 |
|
|
dbg_prt_state = 1;
|
1402 |
|
|
dbg_kept_tsn = tsn;
|
1403 |
|
|
};
|
1404 |
|
|
|
1405 |
|
|
dbg_last_kept_tsn = tsn;
|
1406 |
|
|
#endif /* SCTP_DEBUG */
|
1407 |
|
|
}
|
1408 |
|
|
}
|
1409 |
|
|
|
1410 |
|
|
#if SCTP_DEBUG
|
1411 |
|
|
/* Finish off the last range, displaying its ending TSN. */
|
1412 |
|
|
switch (dbg_prt_state) {
|
1413 |
|
|
case 0:
|
1414 |
|
|
if (dbg_last_ack_tsn != dbg_ack_tsn) {
|
1415 |
|
|
SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn);
|
1416 |
|
|
} else {
|
1417 |
|
|
SCTP_DEBUG_PRINTK("\n");
|
1418 |
|
|
}
|
1419 |
|
|
break;
|
1420 |
|
|
|
1421 |
|
|
case 1:
|
1422 |
|
|
if (dbg_last_kept_tsn != dbg_kept_tsn) {
|
1423 |
|
|
SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn);
|
1424 |
|
|
} else {
|
1425 |
|
|
SCTP_DEBUG_PRINTK("\n");
|
1426 |
|
|
}
|
1427 |
|
|
};
|
1428 |
|
|
#endif /* SCTP_DEBUG */
|
1429 |
|
|
if (transport) {
|
1430 |
|
|
if (bytes_acked) {
|
1431 |
|
|
/* 8.2. When an outstanding TSN is acknowledged,
|
1432 |
|
|
* the endpoint shall clear the error counter of
|
1433 |
|
|
* the destination transport address to which the
|
1434 |
|
|
* DATA chunk was last sent.
|
1435 |
|
|
* The association's overall error counter is
|
1436 |
|
|
* also cleared.
|
1437 |
|
|
*/
|
1438 |
|
|
transport->error_count = 0;
|
1439 |
|
|
transport->asoc->overall_error_count = 0;
|
1440 |
|
|
|
1441 |
|
|
/* Mark the destination transport address as
|
1442 |
|
|
* active if it is not so marked.
|
1443 |
|
|
*/
|
1444 |
|
|
if (!transport->active) {
|
1445 |
|
|
sctp_assoc_control_transport(
|
1446 |
|
|
transport->asoc,
|
1447 |
|
|
transport,
|
1448 |
|
|
SCTP_TRANSPORT_UP,
|
1449 |
|
|
SCTP_RECEIVED_SACK);
|
1450 |
|
|
}
|
1451 |
|
|
|
1452 |
|
|
sctp_transport_raise_cwnd(transport, sack_ctsn,
|
1453 |
|
|
bytes_acked);
|
1454 |
|
|
|
1455 |
|
|
transport->flight_size -= bytes_acked;
|
1456 |
|
|
q->outstanding_bytes -= bytes_acked;
|
1457 |
|
|
} else {
|
1458 |
|
|
/* RFC 2960 6.1, sctpimpguide-06 2.15.2
|
1459 |
|
|
* When a sender is doing zero window probing, it
|
1460 |
|
|
* should not timeout the association if it continues
|
1461 |
|
|
* to receive new packets from the receiver. The
|
1462 |
|
|
* reason is that the receiver MAY keep its window
|
1463 |
|
|
* closed for an indefinite time.
|
1464 |
|
|
* A sender is doing zero window probing when the
|
1465 |
|
|
* receiver's advertised window is zero, and there is
|
1466 |
|
|
* only one data chunk in flight to the receiver.
|
1467 |
|
|
*/
|
1468 |
|
|
if (!q->asoc->peer.rwnd &&
|
1469 |
|
|
!list_empty(&tlist) &&
|
1470 |
|
|
(sack_ctsn+2 == q->asoc->next_tsn)) {
|
1471 |
|
|
SCTP_DEBUG_PRINTK("%s: SACK received for zero "
|
1472 |
|
|
"window probe: %u\n",
|
1473 |
|
|
__FUNCTION__, sack_ctsn);
|
1474 |
|
|
q->asoc->overall_error_count = 0;
|
1475 |
|
|
transport->error_count = 0;
|
1476 |
|
|
}
|
1477 |
|
|
}
|
1478 |
|
|
|
1479 |
|
|
/* RFC 2960 6.3.2 Retransmission Timer Rules
|
1480 |
|
|
*
|
1481 |
|
|
* R2) Whenever all outstanding data sent to an address have
|
1482 |
|
|
* been acknowledged, turn off the T3-rtx timer of that
|
1483 |
|
|
* address.
|
1484 |
|
|
*/
|
1485 |
|
|
if (!transport->flight_size) {
|
1486 |
|
|
if (timer_pending(&transport->T3_rtx_timer) &&
|
1487 |
|
|
del_timer(&transport->T3_rtx_timer)) {
|
1488 |
|
|
sctp_transport_put(transport);
|
1489 |
|
|
}
|
1490 |
|
|
} else if (restart_timer) {
|
1491 |
|
|
if (!mod_timer(&transport->T3_rtx_timer,
|
1492 |
|
|
jiffies + transport->rto))
|
1493 |
|
|
sctp_transport_hold(transport);
|
1494 |
|
|
}
|
1495 |
|
|
}
|
1496 |
|
|
|
1497 |
|
|
list_splice(&tlist, transmitted_queue);
|
1498 |
|
|
}
|
1499 |
|
|
|
1500 |
|
|
/* Mark chunks as missing and consequently may get retransmitted. */
|
1501 |
|
|
static void sctp_mark_missing(struct sctp_outq *q,
|
1502 |
|
|
struct list_head *transmitted_queue,
|
1503 |
|
|
struct sctp_transport *transport,
|
1504 |
|
|
__u32 highest_new_tsn_in_sack,
|
1505 |
|
|
int count_of_newacks)
|
1506 |
|
|
{
|
1507 |
|
|
struct sctp_chunk *chunk;
|
1508 |
|
|
struct list_head *pos;
|
1509 |
|
|
__u32 tsn;
|
1510 |
|
|
char do_fast_retransmit = 0;
|
1511 |
|
|
struct sctp_transport *primary = q->asoc->peer.primary_path;
|
1512 |
|
|
|
1513 |
|
|
list_for_each(pos, transmitted_queue) {
|
1514 |
|
|
|
1515 |
|
|
chunk = list_entry(pos, struct sctp_chunk, transmitted_list);
|
1516 |
|
|
tsn = ntohl(chunk->subh.data_hdr->tsn);
|
1517 |
|
|
|
1518 |
|
|
/* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
|
1519 |
|
|
* 'Unacknowledged TSN's', if the TSN number of an
|
1520 |
|
|
* 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
|
1521 |
|
|
* value, increment the 'TSN.Missing.Report' count on that
|
1522 |
|
|
* chunk if it has NOT been fast retransmitted or marked for
|
1523 |
|
|
* fast retransmit already.
|
1524 |
|
|
*/
|
1525 |
|
|
if (!chunk->fast_retransmit &&
|
1526 |
|
|
!chunk->tsn_gap_acked &&
|
1527 |
|
|
TSN_lt(tsn, highest_new_tsn_in_sack)) {
|
1528 |
|
|
|
1529 |
|
|
/* SFR-CACC may require us to skip marking
|
1530 |
|
|
* this chunk as missing.
|
1531 |
|
|
*/
|
1532 |
|
|
if (!transport || !sctp_cacc_skip(primary, transport,
|
1533 |
|
|
count_of_newacks, tsn)) {
|
1534 |
|
|
chunk->tsn_missing_report++;
|
1535 |
|
|
|
1536 |
|
|
SCTP_DEBUG_PRINTK(
|
1537 |
|
|
"%s: TSN 0x%x missing counter: %d\n",
|
1538 |
|
|
__FUNCTION__, tsn,
|
1539 |
|
|
chunk->tsn_missing_report);
|
1540 |
|
|
}
|
1541 |
|
|
}
|
1542 |
|
|
/*
|
1543 |
|
|
* M4) If any DATA chunk is found to have a
|
1544 |
|
|
* 'TSN.Missing.Report'
|
1545 |
|
|
* value larger than or equal to 4, mark that chunk for
|
1546 |
|
|
* retransmission and start the fast retransmit procedure.
|
1547 |
|
|
*/
|
1548 |
|
|
|
1549 |
|
|
if (chunk->tsn_missing_report >= 4) {
|
1550 |
|
|
chunk->fast_retransmit = 1;
|
1551 |
|
|
do_fast_retransmit = 1;
|
1552 |
|
|
}
|
1553 |
|
|
}
|
1554 |
|
|
|
1555 |
|
|
if (transport) {
|
1556 |
|
|
if (do_fast_retransmit)
|
1557 |
|
|
sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
|
1558 |
|
|
|
1559 |
|
|
SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
|
1560 |
|
|
"ssthresh: %d, flight_size: %d, pba: %d\n",
|
1561 |
|
|
__FUNCTION__, transport, transport->cwnd,
|
1562 |
|
|
transport->ssthresh, transport->flight_size,
|
1563 |
|
|
transport->partial_bytes_acked);
|
1564 |
|
|
}
|
1565 |
|
|
}
|
1566 |
|
|
|
1567 |
|
|
/* Is the given TSN acked by this packet? */
|
1568 |
|
|
static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
|
1569 |
|
|
{
|
1570 |
|
|
int i;
|
1571 |
|
|
sctp_sack_variable_t *frags;
|
1572 |
|
|
__u16 gap;
|
1573 |
|
|
__u32 ctsn = ntohl(sack->cum_tsn_ack);
|
1574 |
|
|
|
1575 |
|
|
if (TSN_lte(tsn, ctsn))
|
1576 |
|
|
goto pass;
|
1577 |
|
|
|
1578 |
|
|
/* 3.3.4 Selective Acknowledgement (SACK) (3):
|
1579 |
|
|
*
|
1580 |
|
|
* Gap Ack Blocks:
|
1581 |
|
|
* These fields contain the Gap Ack Blocks. They are repeated
|
1582 |
|
|
* for each Gap Ack Block up to the number of Gap Ack Blocks
|
1583 |
|
|
* defined in the Number of Gap Ack Blocks field. All DATA
|
1584 |
|
|
* chunks with TSNs greater than or equal to (Cumulative TSN
|
1585 |
|
|
* Ack + Gap Ack Block Start) and less than or equal to
|
1586 |
|
|
* (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
|
1587 |
|
|
* Block are assumed to have been received correctly.
|
1588 |
|
|
*/
|
1589 |
|
|
|
1590 |
|
|
frags = sack->variable;
|
1591 |
|
|
gap = tsn - ctsn;
|
1592 |
|
|
for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
|
1593 |
|
|
if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
|
1594 |
|
|
TSN_lte(gap, ntohs(frags[i].gab.end)))
|
1595 |
|
|
goto pass;
|
1596 |
|
|
}
|
1597 |
|
|
|
1598 |
|
|
return 0;
|
1599 |
|
|
pass:
|
1600 |
|
|
return 1;
|
1601 |
|
|
}
|