OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [ipv4/] [tcp_input.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              Implementation of the Transmission Control Protocol(TCP).
7
 *
8
 * Version:     $Id: tcp_input.c,v 1.1.1.1 2004-04-15 01:13:28 phoenix Exp $
9
 *
10
 * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
11
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12
 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
13
 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
14
 *              Florian La Roche, <flla@stud.uni-sb.de>
15
 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16
 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
17
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
18
 *              Matthew Dillon, <dillon@apollo.west.oic.com>
19
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20
 *              Jorge Cwik, <jorge@laser.satlink.net>
21
 */
22
 
23
/*
24
 * Changes:
25
 *              Pedro Roque     :       Fast Retransmit/Recovery.
26
 *                                      Two receive queues.
27
 *                                      Retransmit queue handled by TCP.
28
 *                                      Better retransmit timer handling.
29
 *                                      New congestion avoidance.
30
 *                                      Header prediction.
31
 *                                      Variable renaming.
32
 *
33
 *              Eric            :       Fast Retransmit.
34
 *              Randy Scott     :       MSS option defines.
35
 *              Eric Schenk     :       Fixes to slow start algorithm.
36
 *              Eric Schenk     :       Yet another double ACK bug.
37
 *              Eric Schenk     :       Delayed ACK bug fixes.
38
 *              Eric Schenk     :       Floyd style fast retrans war avoidance.
39
 *              David S. Miller :       Don't allow zero congestion window.
40
 *              Eric Schenk     :       Fix retransmitter so that it sends
41
 *                                      next packet on ack of previous packet.
42
 *              Andi Kleen      :       Moved open_request checking here
43
 *                                      and process RSTs for open_requests.
44
 *              Andi Kleen      :       Better prune_queue, and other fixes.
45
 *              Andrey Savochkin:       Fix RTT measurements in the presnce of
46
 *                                      timestamps.
47
 *              Andrey Savochkin:       Check sequence numbers correctly when
48
 *                                      removing SACKs due to in sequence incoming
49
 *                                      data segments.
50
 *              Andi Kleen:             Make sure we never ack data there is not
51
 *                                      enough room for. Also make this condition
52
 *                                      a fatal error if it might still happen.
53
 *              Andi Kleen:             Add tcp_measure_rcv_mss to make
54
 *                                      connections with MSS<min(MTU,ann. MSS)
55
 *                                      work without delayed acks.
56
 *              Andi Kleen:             Process packets with PSH set in the
57
 *                                      fast path.
58
 *              J Hadi Salim:           ECN support
59
 *              Andrei Gurtov,
60
 *              Pasi Sarolahti,
61
 *              Panu Kuhlberg:          Experimental audit of TCP (re)transmission
62
 *                                      engine. Lots of bugs are found.
63
 *              Pasi Sarolahti:         F-RTO for dealing with spurious RTOs
64
 *              Angelo Dell'Aera:       TCP Westwood+ support
65
 */
66
 
67
#include <linux/config.h>
68
#include <linux/mm.h>
69
#include <linux/sysctl.h>
70
#include <net/tcp.h>
71
#include <net/inet_common.h>
72
#include <linux/ipsec.h>
73
 
74
int sysctl_tcp_timestamps = 1;
75
int sysctl_tcp_window_scaling = 1;
76
int sysctl_tcp_sack = 1;
77
int sysctl_tcp_fack = 1;
78
int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
79
#ifdef CONFIG_INET_ECN
80
int sysctl_tcp_ecn = 1;
81
#else
82
int sysctl_tcp_ecn = 0;
83
#endif
84
int sysctl_tcp_dsack = 1;
85
int sysctl_tcp_app_win = 31;
86
int sysctl_tcp_adv_win_scale = 2;
87
 
88
int sysctl_tcp_stdurg = 0;
89
int sysctl_tcp_rfc1337 = 0;
90
int sysctl_tcp_max_orphans = NR_FILE;
91
int sysctl_tcp_frto = 0;
92
 
93
int sysctl_tcp_westwood = 0;
94
 
95
#define FLAG_DATA               0x01 /* Incoming frame contained data.          */
96
#define FLAG_WIN_UPDATE         0x02 /* Incoming ACK was a window update.       */
97
#define FLAG_DATA_ACKED         0x04 /* This ACK acknowledged new data.         */
98
#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted.  */
99
#define FLAG_SYN_ACKED          0x10 /* This ACK acknowledged SYN.              */
100
#define FLAG_DATA_SACKED        0x20 /* New SACK.                               */
101
#define FLAG_ECE                0x40 /* ECE in this ACK                         */
102
#define FLAG_DATA_LOST          0x80 /* SACK detected data lossage.             */
103
#define FLAG_SLOWPATH           0x100 /* Do not skip RFC checks for window update.*/
104
 
105
#define FLAG_ACKED              (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
106
#define FLAG_NOT_DUP            (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
107
#define FLAG_CA_ALERT           (FLAG_DATA_SACKED|FLAG_ECE)
108
#define FLAG_FORWARD_PROGRESS   (FLAG_ACKED|FLAG_DATA_SACKED)
109
 
110
#define IsReno(tp) ((tp)->sack_ok == 0)
111
#define IsFack(tp) ((tp)->sack_ok & 2)
112
#define IsDSack(tp) ((tp)->sack_ok & 4)
113
 
114
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
115
 
116
/* Adapt the MSS value used to make delayed ack decision to the
117
 * real world.
118
 */
119
static __inline__ void tcp_measure_rcv_mss(struct tcp_opt *tp, struct sk_buff *skb)
120
{
121
        unsigned int len, lss;
122
 
123
        lss = tp->ack.last_seg_size;
124
        tp->ack.last_seg_size = 0;
125
 
126
        /* skb->len may jitter because of SACKs, even if peer
127
         * sends good full-sized frames.
128
         */
129
        len = skb->len;
130
        if (len >= tp->ack.rcv_mss) {
131
                tp->ack.rcv_mss = len;
132
        } else {
133
                /* Otherwise, we make more careful check taking into account,
134
                 * that SACKs block is variable.
135
                 *
136
                 * "len" is invariant segment length, including TCP header.
137
                 */
138
                len += skb->data - skb->h.raw;
139
                if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
140
                    /* If PSH is not set, packet should be
141
                     * full sized, provided peer TCP is not badly broken.
142
                     * This observation (if it is correct 8)) allows
143
                     * to handle super-low mtu links fairly.
144
                     */
145
                    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
146
                     !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
147
                        /* Subtract also invariant (if peer is RFC compliant),
148
                         * tcp header plus fixed timestamp option length.
149
                         * Resulting "len" is MSS free of SACK jitter.
150
                         */
151
                        len -= tp->tcp_header_len;
152
                        tp->ack.last_seg_size = len;
153
                        if (len == lss) {
154
                                tp->ack.rcv_mss = len;
155
                                return;
156
                        }
157
                }
158
                tp->ack.pending |= TCP_ACK_PUSHED;
159
        }
160
}
161
 
162
static void tcp_incr_quickack(struct tcp_opt *tp)
163
{
164
        unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss);
165
 
166
        if (quickacks==0)
167
                quickacks=2;
168
        if (quickacks > tp->ack.quick)
169
                tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
170
}
171
 
172
void tcp_enter_quickack_mode(struct tcp_opt *tp)
173
{
174
        tcp_incr_quickack(tp);
175
        tp->ack.pingpong = 0;
176
        tp->ack.ato = TCP_ATO_MIN;
177
}
178
 
179
/* Send ACKs quickly, if "quick" count is not exhausted
180
 * and the session is not interactive.
181
 */
182
 
183
static __inline__ int tcp_in_quickack_mode(struct tcp_opt *tp)
184
{
185
        return (tp->ack.quick && !tp->ack.pingpong);
186
}
187
 
188
/* Buffer size and advertised window tuning.
189
 *
190
 * 1. Tuning sk->sndbuf, when connection enters established state.
191
 */
192
 
193
static void tcp_fixup_sndbuf(struct sock *sk)
194
{
195
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
196
        int sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
197
 
198
        if (sk->sndbuf < 3*sndmem)
199
                sk->sndbuf = min(3*sndmem, sysctl_tcp_wmem[2]);
200
}
201
 
202
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
203
 *
204
 * All tcp_full_space() is split to two parts: "network" buffer, allocated
205
 * forward and advertised in receiver window (tp->rcv_wnd) and
206
 * "application buffer", required to isolate scheduling/application
207
 * latencies from network.
208
 * window_clamp is maximal advertised window. It can be less than
209
 * tcp_full_space(), in this case tcp_full_space() - window_clamp
210
 * is reserved for "application" buffer. The less window_clamp is
211
 * the smoother our behaviour from viewpoint of network, but the lower
212
 * throughput and the higher sensitivity of the connection to losses. 8)
213
 *
214
 * rcv_ssthresh is more strict window_clamp used at "slow start"
215
 * phase to predict further behaviour of this connection.
216
 * It is used for two goals:
217
 * - to enforce header prediction at sender, even when application
218
 *   requires some significant "application buffer". It is check #1.
219
 * - to prevent pruning of receive queue because of misprediction
220
 *   of receiver window. Check #2.
221
 *
222
 * The scheme does not work when sender sends good segments opening
223
 * window and then starts to feed us spagetti. But it should work
224
 * in common situations. Otherwise, we have to rely on queue collapsing.
225
 */
226
 
227
/* Slow part of check#2. */
228
static int
229
__tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
230
{
231
        /* Optimize this! */
232
        int truesize = tcp_win_from_space(skb->truesize)/2;
233
        int window = tcp_full_space(sk)/2;
234
 
235
        while (tp->rcv_ssthresh <= window) {
236
                if (truesize <= skb->len)
237
                        return 2*tp->ack.rcv_mss;
238
 
239
                truesize >>= 1;
240
                window >>= 1;
241
        }
242
        return 0;
243
}
244
 
245
static __inline__ void
246
tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
247
{
248
        /* Check #1 */
249
        if (tp->rcv_ssthresh < tp->window_clamp &&
250
            (int)tp->rcv_ssthresh < tcp_space(sk) &&
251
            !tcp_memory_pressure) {
252
                int incr;
253
 
254
                /* Check #2. Increase window, if skb with such overhead
255
                 * will fit to rcvbuf in future.
256
                 */
257
                if (tcp_win_from_space(skb->truesize) <= skb->len)
258
                        incr = 2*tp->advmss;
259
                else
260
                        incr = __tcp_grow_window(sk, tp, skb);
261
 
262
                if (incr) {
263
                        tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp);
264
                        tp->ack.quick |= 1;
265
                }
266
        }
267
}
268
 
269
/* 3. Tuning rcvbuf, when connection enters established state. */
270
 
271
static void tcp_fixup_rcvbuf(struct sock *sk)
272
{
273
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
274
        int rcvmem = tp->advmss+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
275
 
276
        /* Try to select rcvbuf so that 4 mss-sized segments
277
         * will fit to window and correspoding skbs will fit to our rcvbuf.
278
         * (was 3; 4 is minimum to allow fast retransmit to work.)
279
         */
280
        while (tcp_win_from_space(rcvmem) < tp->advmss)
281
                rcvmem += 128;
282
        if (sk->rcvbuf < 4*rcvmem)
283
                sk->rcvbuf = min(4*rcvmem, sysctl_tcp_rmem[2]);
284
}
285
 
286
/* 4. Try to fixup all. It is made iimediately after connection enters
287
 *    established state.
288
 */
289
static void tcp_init_buffer_space(struct sock *sk)
290
{
291
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
292
        int maxwin;
293
 
294
        if (!(sk->userlocks&SOCK_RCVBUF_LOCK))
295
                tcp_fixup_rcvbuf(sk);
296
        if (!(sk->userlocks&SOCK_SNDBUF_LOCK))
297
                tcp_fixup_sndbuf(sk);
298
 
299
        maxwin = tcp_full_space(sk);
300
 
301
        if (tp->window_clamp >= maxwin) {
302
                tp->window_clamp = maxwin;
303
 
304
                if (sysctl_tcp_app_win && maxwin>4*tp->advmss)
305
                        tp->window_clamp = max(maxwin-(maxwin>>sysctl_tcp_app_win), 4*tp->advmss);
306
        }
307
 
308
        /* Force reservation of one segment. */
309
        if (sysctl_tcp_app_win &&
310
            tp->window_clamp > 2*tp->advmss &&
311
            tp->window_clamp + tp->advmss > maxwin)
312
                tp->window_clamp = max(2*tp->advmss, maxwin-tp->advmss);
313
 
314
        tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
315
        tp->snd_cwnd_stamp = tcp_time_stamp;
316
}
317
 
318
/* 5. Recalculate window clamp after socket hit its memory bounds. */
319
static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
320
{
321
        struct sk_buff *skb;
322
        unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
323
        int ofo_win = 0;
324
 
325
        tp->ack.quick = 0;
326
 
327
        skb_queue_walk(&tp->out_of_order_queue, skb) {
328
                ofo_win += skb->len;
329
        }
330
 
331
        /* If overcommit is due to out of order segments,
332
         * do not clamp window. Try to expand rcvbuf instead.
333
         */
334
        if (ofo_win) {
335
                if (sk->rcvbuf < sysctl_tcp_rmem[2] &&
336
                    !(sk->userlocks&SOCK_RCVBUF_LOCK) &&
337
                    !tcp_memory_pressure &&
338
                    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
339
                        sk->rcvbuf = min(atomic_read(&sk->rmem_alloc), sysctl_tcp_rmem[2]);
340
        }
341
        if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf) {
342
                app_win += ofo_win;
343
                if (atomic_read(&sk->rmem_alloc) >= 2*sk->rcvbuf)
344
                        app_win >>= 1;
345
                if (app_win > tp->ack.rcv_mss)
346
                        app_win -= tp->ack.rcv_mss;
347
                app_win = max(app_win, 2U*tp->advmss);
348
 
349
                if (!ofo_win)
350
                        tp->window_clamp = min(tp->window_clamp, app_win);
351
                tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
352
        }
353
}
354
 
355
/* There is something which you must keep in mind when you analyze the
356
 * behavior of the tp->ato delayed ack timeout interval.  When a
357
 * connection starts up, we want to ack as quickly as possible.  The
358
 * problem is that "good" TCP's do slow start at the beginning of data
359
 * transmission.  The means that until we send the first few ACK's the
360
 * sender will sit on his end and only queue most of his data, because
361
 * he can only send snd_cwnd unacked packets at any given time.  For
362
 * each ACK we send, he increments snd_cwnd and transmits more of his
363
 * queue.  -DaveM
364
 */
365
static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
366
{
367
        u32 now;
368
 
369
        tcp_schedule_ack(tp);
370
 
371
        tcp_measure_rcv_mss(tp, skb);
372
 
373
        now = tcp_time_stamp;
374
 
375
        if (!tp->ack.ato) {
376
                /* The _first_ data packet received, initialize
377
                 * delayed ACK engine.
378
                 */
379
                tcp_incr_quickack(tp);
380
                tp->ack.ato = TCP_ATO_MIN;
381
        } else {
382
                int m = now - tp->ack.lrcvtime;
383
 
384
                if (m <= TCP_ATO_MIN/2) {
385
                        /* The fastest case is the first. */
386
                        tp->ack.ato = (tp->ack.ato>>1) + TCP_ATO_MIN/2;
387
                } else if (m < tp->ack.ato) {
388
                        tp->ack.ato = (tp->ack.ato>>1) + m;
389
                        if (tp->ack.ato > tp->rto)
390
                                tp->ack.ato = tp->rto;
391
                } else if (m > tp->rto) {
392
                        /* Too long gap. Apparently sender falled to
393
                         * restart window, so that we send ACKs quickly.
394
                         */
395
                        tcp_incr_quickack(tp);
396
                        tcp_mem_reclaim(sk);
397
                }
398
        }
399
        tp->ack.lrcvtime = now;
400
 
401
        TCP_ECN_check_ce(tp, skb);
402
 
403
        if (skb->len >= 128)
404
                tcp_grow_window(sk, tp, skb);
405
}
406
 
407
/* Called to compute a smoothed rtt estimate. The data fed to this
408
 * routine either comes from timestamps, or from segments that were
409
 * known _not_ to have been retransmitted [see Karn/Partridge
410
 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
411
 * piece by Van Jacobson.
412
 * NOTE: the next three routines used to be one big routine.
413
 * To save cycles in the RFC 1323 implementation it was better to break
414
 * it up into three procedures. -- erics
415
 */
416
static __inline__ void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt)
417
{
418
        long m = mrtt; /* RTT */
419
 
420
        /*      The following amusing code comes from Jacobson's
421
         *      article in SIGCOMM '88.  Note that rtt and mdev
422
         *      are scaled versions of rtt and mean deviation.
423
         *      This is designed to be as fast as possible
424
         *      m stands for "measurement".
425
         *
426
         *      On a 1990 paper the rto value is changed to:
427
         *      RTO = rtt + 4 * mdev
428
         *
429
         * Funny. This algorithm seems to be very broken.
430
         * These formulae increase RTO, when it should be decreased, increase
431
         * too slowly, when it should be incresed fastly, decrease too fastly
432
         * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
433
         * does not matter how to _calculate_ it. Seems, it was trap
434
         * that VJ failed to avoid. 8)
435
         */
436
        if(m == 0)
437
                m = 1;
438
        if (tp->srtt != 0) {
439
                m -= (tp->srtt >> 3);   /* m is now error in rtt est */
440
                tp->srtt += m;          /* rtt = 7/8 rtt + 1/8 new */
441
                if (m < 0) {
442
                        m = -m;         /* m is now abs(error) */
443
                        m -= (tp->mdev >> 2);   /* similar update on mdev */
444
                        /* This is similar to one of Eifel findings.
445
                         * Eifel blocks mdev updates when rtt decreases.
446
                         * This solution is a bit different: we use finer gain
447
                         * for mdev in this case (alpha*beta).
448
                         * Like Eifel it also prevents growth of rto,
449
                         * but also it limits too fast rto decreases,
450
                         * happening in pure Eifel.
451
                         */
452
                        if (m > 0)
453
                                m >>= 3;
454
                } else {
455
                        m -= (tp->mdev >> 2);   /* similar update on mdev */
456
                }
457
                tp->mdev += m;          /* mdev = 3/4 mdev + 1/4 new */
458
                if (tp->mdev > tp->mdev_max) {
459
                        tp->mdev_max = tp->mdev;
460
                        if (tp->mdev_max > tp->rttvar)
461
                                tp->rttvar = tp->mdev_max;
462
                }
463
                if (after(tp->snd_una, tp->rtt_seq)) {
464
                        if (tp->mdev_max < tp->rttvar)
465
                                tp->rttvar -= (tp->rttvar-tp->mdev_max)>>2;
466
                        tp->rtt_seq = tp->snd_nxt;
467
                        tp->mdev_max = TCP_RTO_MIN;
468
                }
469
        } else {
470
                /* no previous measure. */
471
                tp->srtt = m<<3;        /* take the measured time to be rtt */
472
                tp->mdev = m<<1;        /* make sure rto = 3*rtt */
473
                tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
474
                tp->rtt_seq = tp->snd_nxt;
475
        }
476
 
477
        tcp_westwood_update_rtt(tp, tp->srtt >> 3);
478
}
479
 
480
/* Calculate rto without backoff.  This is the second half of Van Jacobson's
481
 * routine referred to above.
482
 */
483
static __inline__ void tcp_set_rto(struct tcp_opt *tp)
484
{
485
        /* Old crap is replaced with new one. 8)
486
         *
487
         * More seriously:
488
         * 1. If rtt variance happened to be less 50msec, it is hallucination.
489
         *    It cannot be less due to utterly erratic ACK generation made
490
         *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
491
         *    to do with delayed acks, because at cwnd>2 true delack timeout
492
         *    is invisible. Actually, Linux-2.4 also generates erratic
493
         *    ACKs in some curcumstances.
494
         */
495
        tp->rto = (tp->srtt >> 3) + tp->rttvar;
496
 
497
        /* 2. Fixups made earlier cannot be right.
498
         *    If we do not estimate RTO correctly without them,
499
         *    all the algo is pure shit and should be replaced
500
         *    with correct one. It is exaclty, which we pretend to do.
501
         */
502
}
503
 
504
/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
505
 * guarantees that rto is higher.
506
 */
507
static __inline__ void tcp_bound_rto(struct tcp_opt *tp)
508
{
509
        if (tp->rto > TCP_RTO_MAX)
510
                tp->rto = TCP_RTO_MAX;
511
}
512
 
513
/* Save metrics learned by this TCP session.
514
   This function is called only, when TCP finishes successfully
515
   i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
516
 */
517
void tcp_update_metrics(struct sock *sk)
518
{
519
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
520
        struct dst_entry *dst = __sk_dst_get(sk);
521
 
522
        dst_confirm(dst);
523
 
524
        if (dst && (dst->flags&DST_HOST)) {
525
                int m;
526
 
527
                if (tp->backoff || !tp->srtt) {
528
                        /* This session failed to estimate rtt. Why?
529
                         * Probably, no packets returned in time.
530
                         * Reset our results.
531
                         */
532
                        if (!(dst->mxlock&(1<<RTAX_RTT)))
533
                                dst->rtt = 0;
534
                        return;
535
                }
536
 
537
                m = dst->rtt - tp->srtt;
538
 
539
                /* If newly calculated rtt larger than stored one,
540
                 * store new one. Otherwise, use EWMA. Remember,
541
                 * rtt overestimation is always better than underestimation.
542
                 */
543
                if (!(dst->mxlock&(1<<RTAX_RTT))) {
544
                        if (m <= 0)
545
                                dst->rtt = tp->srtt;
546
                        else
547
                                dst->rtt -= (m>>3);
548
                }
549
 
550
                if (!(dst->mxlock&(1<<RTAX_RTTVAR))) {
551
                        if (m < 0)
552
                                m = -m;
553
 
554
                        /* Scale deviation to rttvar fixed point */
555
                        m >>= 1;
556
                        if (m < tp->mdev)
557
                                m = tp->mdev;
558
 
559
                        if (m >= dst->rttvar)
560
                                dst->rttvar = m;
561
                        else
562
                                dst->rttvar -= (dst->rttvar - m)>>2;
563
                }
564
 
565
                if (tp->snd_ssthresh >= 0xFFFF) {
566
                        /* Slow start still did not finish. */
567
                        if (dst->ssthresh &&
568
                            !(dst->mxlock&(1<<RTAX_SSTHRESH)) &&
569
                            (tp->snd_cwnd>>1) > dst->ssthresh)
570
                                dst->ssthresh = (tp->snd_cwnd>>1);
571
                        if (!(dst->mxlock&(1<<RTAX_CWND)) &&
572
                            tp->snd_cwnd > dst->cwnd)
573
                                dst->cwnd = tp->snd_cwnd;
574
                } else if (tp->snd_cwnd > tp->snd_ssthresh &&
575
                           tp->ca_state == TCP_CA_Open) {
576
                        /* Cong. avoidance phase, cwnd is reliable. */
577
                        if (!(dst->mxlock&(1<<RTAX_SSTHRESH)))
578
                                dst->ssthresh = max(tp->snd_cwnd>>1, tp->snd_ssthresh);
579
                        if (!(dst->mxlock&(1<<RTAX_CWND)))
580
                                dst->cwnd = (dst->cwnd + tp->snd_cwnd)>>1;
581
                } else {
582
                        /* Else slow start did not finish, cwnd is non-sense,
583
                           ssthresh may be also invalid.
584
                         */
585
                        if (!(dst->mxlock&(1<<RTAX_CWND)))
586
                                dst->cwnd = (dst->cwnd + tp->snd_ssthresh)>>1;
587
                        if (dst->ssthresh &&
588
                            !(dst->mxlock&(1<<RTAX_SSTHRESH)) &&
589
                            tp->snd_ssthresh > dst->ssthresh)
590
                                dst->ssthresh = tp->snd_ssthresh;
591
                }
592
 
593
                if (!(dst->mxlock&(1<<RTAX_REORDERING))) {
594
                        if (dst->reordering < tp->reordering &&
595
                            tp->reordering != sysctl_tcp_reordering)
596
                                dst->reordering = tp->reordering;
597
                }
598
        }
599
}
600
 
601
/* Increase initial CWND conservatively: if estimated
602
 * RTT is low enough (<20msec) or if we have some preset ssthresh.
603
 *
604
 * Numbers are taken from RFC2414.
605
 */
606
__u32 tcp_init_cwnd(struct tcp_opt *tp)
607
{
608
        __u32 cwnd;
609
 
610
        if (tp->mss_cache > 1460)
611
                return 2;
612
 
613
        cwnd = (tp->mss_cache > 1095) ? 3 : 4;
614
 
615
        if (!tp->srtt || (tp->snd_ssthresh >= 0xFFFF && tp->srtt > ((HZ/50)<<3)))
616
                cwnd = 2;
617
        else if (cwnd > tp->snd_ssthresh)
618
                cwnd = tp->snd_ssthresh;
619
 
620
        return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
621
}
622
 
623
/* Initialize metrics on socket. */
624
 
625
static void tcp_init_metrics(struct sock *sk)
626
{
627
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
628
        struct dst_entry *dst = __sk_dst_get(sk);
629
 
630
        if (dst == NULL)
631
                goto reset;
632
 
633
        dst_confirm(dst);
634
 
635
        if (dst->mxlock&(1<<RTAX_CWND))
636
                tp->snd_cwnd_clamp = dst->cwnd;
637
        if (dst->ssthresh) {
638
                tp->snd_ssthresh = dst->ssthresh;
639
                if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
640
                        tp->snd_ssthresh = tp->snd_cwnd_clamp;
641
        }
642
        if (dst->reordering && tp->reordering != dst->reordering) {
643
                tp->sack_ok &= ~2;
644
                tp->reordering = dst->reordering;
645
        }
646
 
647
        if (dst->rtt == 0)
648
                goto reset;
649
 
650
        if (!tp->srtt && dst->rtt < (TCP_TIMEOUT_INIT<<3))
651
                goto reset;
652
 
653
        /* Initial rtt is determined from SYN,SYN-ACK.
654
         * The segment is small and rtt may appear much
655
         * less than real one. Use per-dst memory
656
         * to make it more realistic.
657
         *
658
         * A bit of theory. RTT is time passed after "normal" sized packet
659
         * is sent until it is ACKed. In normal curcumstances sending small
660
         * packets force peer to delay ACKs and calculation is correct too.
661
         * The algorithm is adaptive and, provided we follow specs, it
662
         * NEVER underestimate RTT. BUT! If peer tries to make some clever
663
         * tricks sort of "quick acks" for time long enough to decrease RTT
664
         * to low value, and then abruptly stops to do it and starts to delay
665
         * ACKs, wait for troubles.
666
         */
667
        if (dst->rtt > tp->srtt)
668
                tp->srtt = dst->rtt;
669
        if (dst->rttvar > tp->mdev) {
670
                tp->mdev = dst->rttvar;
671
                tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
672
        }
673
        tcp_set_rto(tp);
674
        tcp_bound_rto(tp);
675
        if (tp->rto < TCP_TIMEOUT_INIT && !tp->saw_tstamp)
676
                goto reset;
677
        tp->snd_cwnd = tcp_init_cwnd(tp);
678
        tp->snd_cwnd_stamp = tcp_time_stamp;
679
        return;
680
 
681
reset:
682
        /* Play conservative. If timestamps are not
683
         * supported, TCP will fail to recalculate correct
684
         * rtt, if initial rto is too small. FORGET ALL AND RESET!
685
         */
686
        if (!tp->saw_tstamp && tp->srtt) {
687
                tp->srtt = 0;
688
                tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
689
                tp->rto = TCP_TIMEOUT_INIT;
690
        }
691
}
692
 
693
static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts)
694
{
695
        if (metric > tp->reordering) {
696
                tp->reordering = min(TCP_MAX_REORDERING, metric);
697
 
698
                /* This exciting event is worth to be remembered. 8) */
699
                if (ts)
700
                        NET_INC_STATS_BH(TCPTSReorder);
701
                else if (IsReno(tp))
702
                        NET_INC_STATS_BH(TCPRenoReorder);
703
                else if (IsFack(tp))
704
                        NET_INC_STATS_BH(TCPFACKReorder);
705
                else
706
                        NET_INC_STATS_BH(TCPSACKReorder);
707
#if FASTRETRANS_DEBUG > 1
708
                printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
709
                       tp->sack_ok, tp->ca_state,
710
                       tp->reordering, tp->fackets_out, tp->sacked_out,
711
                       tp->undo_marker ? tp->undo_retrans : 0);
712
#endif
713
                /* Disable FACK yet. */
714
                tp->sack_ok &= ~2;
715
        }
716
}
717
 
718
/* This procedure tags the retransmission queue when SACKs arrive.
719
 *
720
 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
721
 * Packets in queue with these bits set are counted in variables
722
 * sacked_out, retrans_out and lost_out, correspondingly.
723
 *
724
 * Valid combinations are:
725
 * Tag  InFlight        Description
726
 * 0    1               - orig segment is in flight.
727
 * S    0                - nothing flies, orig reached receiver.
728
 * L    0                - nothing flies, orig lost by net.
729
 * R    2               - both orig and retransmit are in flight.
730
 * L|R  1               - orig is lost, retransmit is in flight.
731
 * S|R  1               - orig reached receiver, retrans is still in flight.
732
 * (L|S|R is logically valid, it could occur when L|R is sacked,
733
 *  but it is equivalent to plain S and code short-curcuits it to S.
734
 *  L|S is logically invalid, it would mean -1 packet in flight 8))
735
 *
736
 * These 6 states form finite state machine, controlled by the following events:
737
 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
738
 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
739
 * 3. Loss detection event of one of three flavors:
740
 *      A. Scoreboard estimator decided the packet is lost.
741
 *         A'. Reno "three dupacks" marks head of queue lost.
742
 *         A''. Its FACK modfication, head until snd.fack is lost.
743
 *      B. SACK arrives sacking data transmitted after never retransmitted
744
 *         hole was sent out.
745
 *      C. SACK arrives sacking SND.NXT at the moment, when the
746
 *         segment was retransmitted.
747
 * 4. D-SACK added new rule: D-SACK changes any tag to S.
748
 *
749
 * It is pleasant to note, that state diagram turns out to be commutative,
750
 * so that we are allowed not to be bothered by order of our actions,
751
 * when multiple events arrive simultaneously. (see the function below).
752
 *
753
 * Reordering detection.
754
 * --------------------
755
 * Reordering metric is maximal distance, which a packet can be displaced
756
 * in packet stream. With SACKs we can estimate it:
757
 *
758
 * 1. SACK fills old hole and the corresponding segment was not
759
 *    ever retransmitted -> reordering. Alas, we cannot use it
760
 *    when segment was retransmitted.
761
 * 2. The last flaw is solved with D-SACK. D-SACK arrives
762
 *    for retransmitted and already SACKed segment -> reordering..
763
 * Both of these heuristics are not used in Loss state, when we cannot
764
 * account for retransmits accurately.
765
 */
766
static int
767
tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
768
{
769
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
770
        unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
771
        struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
772
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
773
        int reord = tp->packets_out;
774
        int prior_fackets;
775
        u32 lost_retrans = 0;
776
        int flag = 0;
777
        int i;
778
 
779
        if (!tp->sacked_out)
780
                tp->fackets_out = 0;
781
        prior_fackets = tp->fackets_out;
782
 
783
        for (i=0; i<num_sacks; i++, sp++) {
784
                struct sk_buff *skb;
785
                __u32 start_seq = ntohl(sp->start_seq);
786
                __u32 end_seq = ntohl(sp->end_seq);
787
                int fack_count = 0;
788
                int dup_sack = 0;
789
 
790
                /* Check for D-SACK. */
791
                if (i == 0) {
792
                        u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
793
 
794
                        if (before(start_seq, ack)) {
795
                                dup_sack = 1;
796
                                tp->sack_ok |= 4;
797
                                NET_INC_STATS_BH(TCPDSACKRecv);
798
                        } else if (num_sacks > 1 &&
799
                                   !after(end_seq, ntohl(sp[1].end_seq)) &&
800
                                   !before(start_seq, ntohl(sp[1].start_seq))) {
801
                                dup_sack = 1;
802
                                tp->sack_ok |= 4;
803
                                NET_INC_STATS_BH(TCPDSACKOfoRecv);
804
                        }
805
 
806
                        /* D-SACK for already forgotten data...
807
                         * Do dumb counting. */
808
                        if (dup_sack &&
809
                            !after(end_seq, prior_snd_una) &&
810
                            after(end_seq, tp->undo_marker))
811
                                tp->undo_retrans--;
812
 
813
                        /* Eliminate too old ACKs, but take into
814
                         * account more or less fresh ones, they can
815
                         * contain valid SACK info.
816
                         */
817
                        if (before(ack, prior_snd_una-tp->max_window))
818
                                return 0;
819
                }
820
 
821
                /* Event "B" in the comment above. */
822
                if (after(end_seq, tp->high_seq))
823
                        flag |= FLAG_DATA_LOST;
824
 
825
                for_retrans_queue(skb, sk, tp) {
826
                        u8 sacked = TCP_SKB_CB(skb)->sacked;
827
                        int in_sack;
828
 
829
                        /* The retransmission queue is always in order, so
830
                         * we can short-circuit the walk early.
831
                         */
832
                        if(!before(TCP_SKB_CB(skb)->seq, end_seq))
833
                                break;
834
 
835
                        fack_count++;
836
 
837
                        in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
838
                                !before(end_seq, TCP_SKB_CB(skb)->end_seq);
839
 
840
                        /* Account D-SACK for retransmitted packet. */
841
                        if ((dup_sack && in_sack) &&
842
                            (sacked & TCPCB_RETRANS) &&
843
                            after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
844
                                tp->undo_retrans--;
845
 
846
                        /* The frame is ACKed. */
847
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) {
848
                                if (sacked&TCPCB_RETRANS) {
849
                                        if ((dup_sack && in_sack) &&
850
                                            (sacked&TCPCB_SACKED_ACKED))
851
                                                reord = min(fack_count, reord);
852
                                } else {
853
                                        /* If it was in a hole, we detected reordering. */
854
                                        if (fack_count < prior_fackets &&
855
                                            !(sacked&TCPCB_SACKED_ACKED))
856
                                                reord = min(fack_count, reord);
857
                                }
858
 
859
                                /* Nothing to do; acked frame is about to be dropped. */
860
                                continue;
861
                        }
862
 
863
                        if ((sacked&TCPCB_SACKED_RETRANS) &&
864
                            after(end_seq, TCP_SKB_CB(skb)->ack_seq) &&
865
                            (!lost_retrans || after(end_seq, lost_retrans)))
866
                                lost_retrans = end_seq;
867
 
868
                        if (!in_sack)
869
                                continue;
870
 
871
                        if (!(sacked&TCPCB_SACKED_ACKED)) {
872
                                if (sacked & TCPCB_SACKED_RETRANS) {
873
                                        /* If the segment is not tagged as lost,
874
                                         * we do not clear RETRANS, believing
875
                                         * that retransmission is still in flight.
876
                                         */
877
                                        if (sacked & TCPCB_LOST) {
878
                                                TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
879
                                                tp->lost_out--;
880
                                                tp->retrans_out--;
881
                                        }
882
                                } else {
883
                                        /* New sack for not retransmitted frame,
884
                                         * which was in hole. It is reordering.
885
                                         */
886
                                        if (!(sacked & TCPCB_RETRANS) &&
887
                                            fack_count < prior_fackets)
888
                                                reord = min(fack_count, reord);
889
 
890
                                        if (sacked & TCPCB_LOST) {
891
                                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
892
                                                tp->lost_out--;
893
                                        }
894
                                }
895
 
896
                                TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
897
                                flag |= FLAG_DATA_SACKED;
898
                                tp->sacked_out++;
899
 
900
                                if (fack_count > tp->fackets_out)
901
                                        tp->fackets_out = fack_count;
902
                        } else {
903
                                if (dup_sack && (sacked&TCPCB_RETRANS))
904
                                        reord = min(fack_count, reord);
905
                        }
906
 
907
                        /* D-SACK. We can detect redundant retransmission
908
                         * in S|R and plain R frames and clear it.
909
                         * undo_retrans is decreased above, L|R frames
910
                         * are accounted above as well.
911
                         */
912
                        if (dup_sack &&
913
                            (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
914
                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
915
                                tp->retrans_out--;
916
                        }
917
                }
918
        }
919
 
920
        /* Check for lost retransmit. This superb idea is
921
         * borrowed from "ratehalving". Event "C".
922
         * Later note: FACK people cheated me again 8),
923
         * we have to account for reordering! Ugly,
924
         * but should help.
925
         */
926
        if (lost_retrans && tp->ca_state == TCP_CA_Recovery) {
927
                struct sk_buff *skb;
928
 
929
                for_retrans_queue(skb, sk, tp) {
930
                        if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
931
                                break;
932
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
933
                                continue;
934
                        if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) &&
935
                            after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) &&
936
                            (IsFack(tp) ||
937
                             !before(lost_retrans, TCP_SKB_CB(skb)->ack_seq+tp->reordering*tp->mss_cache))) {
938
                                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
939
                                tp->retrans_out--;
940
 
941
                                if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) {
942
                                        tp->lost_out++;
943
                                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
944
                                        flag |= FLAG_DATA_SACKED;
945
                                        NET_INC_STATS_BH(TCPLostRetransmit);
946
                                }
947
                        }
948
                }
949
        }
950
 
951
        tp->left_out = tp->sacked_out + tp->lost_out;
952
 
953
        if (reord < tp->fackets_out && tp->ca_state != TCP_CA_Loss)
954
                tcp_update_reordering(tp, (tp->fackets_out+1)-reord, 0);
955
 
956
#if FASTRETRANS_DEBUG > 0
957
        BUG_TRAP((int)tp->sacked_out >= 0);
958
        BUG_TRAP((int)tp->lost_out >= 0);
959
        BUG_TRAP((int)tp->retrans_out >= 0);
960
        BUG_TRAP((int)tcp_packets_in_flight(tp) >= 0);
961
#endif
962
        return flag;
963
}
964
 
965
/* RTO occurred, but do not yet enter loss state. Instead, transmit two new
966
 * segments to see from the next ACKs whether any data was really missing.
967
 * If the RTO was spurious, new ACKs should arrive.
968
 */
969
void tcp_enter_frto(struct sock *sk)
970
{
971
        struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
972
        struct sk_buff *skb;
973
 
974
        tp->frto_counter = 1;
975
 
976
        if (tp->ca_state <= TCP_CA_Disorder ||
977
            tp->snd_una == tp->high_seq ||
978
            (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
979
                tp->prior_ssthresh = tcp_current_ssthresh(tp);
980
                tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
981
        }
982
 
983
        /* Have to clear retransmission markers here to keep the bookkeeping
984
         * in shape, even though we are not yet in Loss state.
985
         * If something was really lost, it is eventually caught up
986
         * in tcp_enter_frto_loss.
987
         */
988
        tp->retrans_out = 0;
989
        tp->undo_marker = tp->snd_una;
990
        tp->undo_retrans = 0;
991
 
992
        for_retrans_queue(skb, sk, tp) {
993
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_RETRANS;
994
        }
995
        tcp_sync_left_out(tp);
996
 
997
        tp->ca_state = TCP_CA_Open;
998
        tp->frto_highmark = tp->snd_nxt;
999
}
1000
 
1001
/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
1002
 * which indicates that we should follow the traditional RTO recovery,
1003
 * i.e. mark everything lost and do go-back-N retransmission.
1004
 */
1005
void tcp_enter_frto_loss(struct sock *sk)
1006
{
1007
        struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1008
        struct sk_buff *skb;
1009
        int cnt = 0;
1010
 
1011
        tp->sacked_out = 0;
1012
        tp->lost_out = 0;
1013
        tp->fackets_out = 0;
1014
 
1015
        for_retrans_queue(skb, sk, tp) {
1016
                cnt++;
1017
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1018
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) {
1019
 
1020
                        /* Do not mark those segments lost that were
1021
                         * forward transmitted after RTO
1022
                         */
1023
                        if(!after(TCP_SKB_CB(skb)->end_seq,
1024
                                   tp->frto_highmark)) {
1025
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1026
                                tp->lost_out++;
1027
                        }
1028
                } else {
1029
                        tp->sacked_out++;
1030
                        tp->fackets_out = cnt;
1031
                }
1032
        }
1033
        tcp_sync_left_out(tp);
1034
 
1035
        tp->snd_cwnd = tp->frto_counter + tcp_packets_in_flight(tp)+1;
1036
        tp->snd_cwnd_cnt = 0;
1037
        tp->snd_cwnd_stamp = tcp_time_stamp;
1038
        tp->undo_marker = 0;
1039
        tp->frto_counter = 0;
1040
 
1041
        tp->reordering = min_t(unsigned int, tp->reordering,
1042
                                             sysctl_tcp_reordering);
1043
        tp->ca_state = TCP_CA_Loss;
1044
        tp->high_seq = tp->frto_highmark;
1045
        TCP_ECN_queue_cwr(tp);
1046
}
1047
 
1048
void tcp_clear_retrans(struct tcp_opt *tp)
1049
{
1050
        tp->left_out = 0;
1051
        tp->retrans_out = 0;
1052
 
1053
        tp->fackets_out = 0;
1054
        tp->sacked_out = 0;
1055
        tp->lost_out = 0;
1056
 
1057
        tp->undo_marker = 0;
1058
        tp->undo_retrans = 0;
1059
}
1060
 
1061
/* Enter Loss state. If "how" is not zero, forget all SACK information
1062
 * and reset tags completely, otherwise preserve SACKs. If receiver
1063
 * dropped its ofo queue, we will know this due to reneging detection.
1064
 */
1065
void tcp_enter_loss(struct sock *sk, int how)
1066
{
1067
        struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1068
        struct sk_buff *skb;
1069
        int cnt = 0;
1070
 
1071
        /* Reduce ssthresh if it has not yet been made inside this window. */
1072
        if (tp->ca_state <= TCP_CA_Disorder ||
1073
            tp->snd_una == tp->high_seq ||
1074
            (tp->ca_state == TCP_CA_Loss && !tp->retransmits)) {
1075
                tp->prior_ssthresh = tcp_current_ssthresh(tp);
1076
 
1077
                if (!(tcp_westwood_ssthresh(tp)))
1078
                        tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1079
        }
1080
        tp->snd_cwnd = 1;
1081
        tp->snd_cwnd_cnt = 0;
1082
        tp->snd_cwnd_stamp = tcp_time_stamp;
1083
 
1084
        tcp_clear_retrans(tp);
1085
 
1086
        /* Push undo marker, if it was plain RTO and nothing
1087
         * was retransmitted. */
1088
        if (!how)
1089
                tp->undo_marker = tp->snd_una;
1090
 
1091
        for_retrans_queue(skb, sk, tp) {
1092
                cnt++;
1093
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
1094
                        tp->undo_marker = 0;
1095
                TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
1096
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
1097
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
1098
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1099
                        tp->lost_out++;
1100
                } else {
1101
                        tp->sacked_out++;
1102
                        tp->fackets_out = cnt;
1103
                }
1104
        }
1105
        tcp_sync_left_out(tp);
1106
 
1107
        tp->reordering = min_t(unsigned int, tp->reordering, sysctl_tcp_reordering);
1108
        tp->ca_state = TCP_CA_Loss;
1109
        tp->high_seq = tp->snd_nxt;
1110
        TCP_ECN_queue_cwr(tp);
1111
}
1112
 
1113
static int tcp_check_sack_reneging(struct sock *sk, struct tcp_opt *tp)
1114
{
1115
        struct sk_buff *skb;
1116
 
1117
        /* If ACK arrived pointing to a remembered SACK,
1118
         * it means that our remembered SACKs do not reflect
1119
         * real state of receiver i.e.
1120
         * receiver _host_ is heavily congested (or buggy).
1121
         * Do processing similar to RTO timeout.
1122
         */
1123
        if ((skb = skb_peek(&sk->write_queue)) != NULL &&
1124
            (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1125
                NET_INC_STATS_BH(TCPSACKReneging);
1126
 
1127
                tcp_enter_loss(sk, 1);
1128
                tp->retransmits++;
1129
                tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
1130
                tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
1131
                return 1;
1132
        }
1133
        return 0;
1134
}
1135
 
1136
static inline int tcp_fackets_out(struct tcp_opt *tp)
1137
{
1138
        return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out;
1139
}
1140
 
1141
static inline int tcp_skb_timedout(struct tcp_opt *tp, struct sk_buff *skb)
1142
{
1143
        return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto);
1144
}
1145
 
1146
static inline int tcp_head_timedout(struct sock *sk, struct tcp_opt *tp)
1147
{
1148
        return tp->packets_out && tcp_skb_timedout(tp, skb_peek(&sk->write_queue));
1149
}
1150
 
1151
/* Linux NewReno/SACK/FACK/ECN state machine.
1152
 * --------------------------------------
1153
 *
1154
 * "Open"       Normal state, no dubious events, fast path.
1155
 * "Disorder"   In all the respects it is "Open",
1156
 *              but requires a bit more attention. It is entered when
1157
 *              we see some SACKs or dupacks. It is split of "Open"
1158
 *              mainly to move some processing from fast path to slow one.
1159
 * "CWR"        CWND was reduced due to some Congestion Notification event.
1160
 *              It can be ECN, ICMP source quench, local device congestion.
1161
 * "Recovery"   CWND was reduced, we are fast-retransmitting.
1162
 * "Loss"       CWND was reduced due to RTO timeout or SACK reneging.
1163
 *
1164
 * tcp_fastretrans_alert() is entered:
1165
 * - each incoming ACK, if state is not "Open"
1166
 * - when arrived ACK is unusual, namely:
1167
 *      * SACK
1168
 *      * Duplicate ACK.
1169
 *      * ECN ECE.
1170
 *
1171
 * Counting packets in flight is pretty simple.
1172
 *
1173
 *      in_flight = packets_out - left_out + retrans_out
1174
 *
1175
 *      packets_out is SND.NXT-SND.UNA counted in packets.
1176
 *
1177
 *      retrans_out is number of retransmitted segments.
1178
 *
1179
 *      left_out is number of segments left network, but not ACKed yet.
1180
 *
1181
 *              left_out = sacked_out + lost_out
1182
 *
1183
 *     sacked_out: Packets, which arrived to receiver out of order
1184
 *                 and hence not ACKed. With SACKs this number is simply
1185
 *                 amount of SACKed data. Even without SACKs
1186
 *                 it is easy to give pretty reliable estimate of this number,
1187
 *                 counting duplicate ACKs.
1188
 *
1189
 *       lost_out: Packets lost by network. TCP has no explicit
1190
 *                 "loss notification" feedback from network (for now).
1191
 *                 It means that this number can be only _guessed_.
1192
 *                 Actually, it is the heuristics to predict lossage that
1193
 *                 distinguishes different algorithms.
1194
 *
1195
 *      F.e. after RTO, when all the queue is considered as lost,
1196
 *      lost_out = packets_out and in_flight = retrans_out.
1197
 *
1198
 *              Essentially, we have now two algorithms counting
1199
 *              lost packets.
1200
 *
1201
 *              FACK: It is the simplest heuristics. As soon as we decided
1202
 *              that something is lost, we decide that _all_ not SACKed
1203
 *              packets until the most forward SACK are lost. I.e.
1204
 *              lost_out = fackets_out - sacked_out and left_out = fackets_out.
1205
 *              It is absolutely correct estimate, if network does not reorder
1206
 *              packets. And it loses any connection to reality when reordering
1207
 *              takes place. We use FACK by default until reordering
1208
 *              is suspected on the path to this destination.
1209
 *
1210
 *              NewReno: when Recovery is entered, we assume that one segment
1211
 *              is lost (classic Reno). While we are in Recovery and
1212
 *              a partial ACK arrives, we assume that one more packet
1213
 *              is lost (NewReno). This heuristics are the same in NewReno
1214
 *              and SACK.
1215
 *
1216
 *  Imagine, that's all! Forget about all this shamanism about CWND inflation
1217
 *  deflation etc. CWND is real congestion window, never inflated, changes
1218
 *  only according to classic VJ rules.
1219
 *
1220
 * Really tricky (and requiring careful tuning) part of algorithm
1221
 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
1222
 * The first determines the moment _when_ we should reduce CWND and,
1223
 * hence, slow down forward transmission. In fact, it determines the moment
1224
 * when we decide that hole is caused by loss, rather than by a reorder.
1225
 *
1226
 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
1227
 * holes, caused by lost packets.
1228
 *
1229
 * And the most logically complicated part of algorithm is undo
1230
 * heuristics. We detect false retransmits due to both too early
1231
 * fast retransmit (reordering) and underestimated RTO, analyzing
1232
 * timestamps and D-SACKs. When we detect that some segments were
1233
 * retransmitted by mistake and CWND reduction was wrong, we undo
1234
 * window reduction and abort recovery phase. This logic is hidden
1235
 * inside several functions named tcp_try_undo_<something>.
1236
 */
1237
 
1238
/* This function decides, when we should leave Disordered state
1239
 * and enter Recovery phase, reducing congestion window.
1240
 *
1241
 * Main question: may we further continue forward transmission
1242
 * with the same cwnd?
1243
 */
1244
static int
1245
tcp_time_to_recover(struct sock *sk, struct tcp_opt *tp)
1246
{
1247
        /* Trick#1: The loss is proven. */
1248
        if (tp->lost_out)
1249
                return 1;
1250
 
1251
        /* Not-A-Trick#2 : Classic rule... */
1252
        if (tcp_fackets_out(tp) > tp->reordering)
1253
                return 1;
1254
 
1255
        /* Trick#3 : when we use RFC2988 timer restart, fast
1256
         * retransmit can be triggered by timeout of queue head.
1257
         */
1258
        if (tcp_head_timedout(sk, tp))
1259
                return 1;
1260
 
1261
        /* Trick#4: It is still not OK... But will it be useful to delay
1262
         * recovery more?
1263
         */
1264
        if (tp->packets_out <= tp->reordering &&
1265
            tp->sacked_out >= max_t(__u32, tp->packets_out/2, sysctl_tcp_reordering) &&
1266
            !tcp_may_send_now(sk, tp)) {
1267
                /* We have nothing to send. This connection is limited
1268
                 * either by receiver window or by application.
1269
                 */
1270
                return 1;
1271
        }
1272
 
1273
        return 0;
1274
}
1275
 
1276
/* If we receive more dupacks than we expected counting segments
1277
 * in assumption of absent reordering, interpret this as reordering.
1278
 * The only another reason could be bug in receiver TCP.
1279
 */
1280
static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend)
1281
{
1282
        u32 holes;
1283
 
1284
        holes = max(tp->lost_out, 1U);
1285
        holes = min(holes, tp->packets_out);
1286
 
1287
        if (tp->sacked_out + holes > tp->packets_out) {
1288
                tp->sacked_out = tp->packets_out - holes;
1289
                tcp_update_reordering(tp, tp->packets_out+addend, 0);
1290
        }
1291
}
1292
 
1293
/* Emulate SACKs for SACKless connection: account for a new dupack. */
1294
 
1295
static void tcp_add_reno_sack(struct tcp_opt *tp)
1296
{
1297
        ++tp->sacked_out;
1298
        tcp_check_reno_reordering(tp, 0);
1299
        tcp_sync_left_out(tp);
1300
}
1301
 
1302
/* Account for ACK, ACKing some data in Reno Recovery phase. */
1303
 
1304
static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_opt *tp, int acked)
1305
{
1306
        if (acked > 0) {
1307
                /* One ACK acked hole. The rest eat duplicate ACKs. */
1308
                if (acked-1 >= tp->sacked_out)
1309
                        tp->sacked_out = 0;
1310
                else
1311
                        tp->sacked_out -= acked-1;
1312
        }
1313
        tcp_check_reno_reordering(tp, acked);
1314
        tcp_sync_left_out(tp);
1315
}
1316
 
1317
static inline void tcp_reset_reno_sack(struct tcp_opt *tp)
1318
{
1319
        tp->sacked_out = 0;
1320
        tp->left_out = tp->lost_out;
1321
}
1322
 
1323
/* Mark head of queue up as lost. */
1324
static void
1325
tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_seq)
1326
{
1327
        struct sk_buff *skb;
1328
        int cnt = packets;
1329
 
1330
        BUG_TRAP(cnt <= tp->packets_out);
1331
 
1332
        for_retrans_queue(skb, sk, tp) {
1333
                if (--cnt < 0 || after(TCP_SKB_CB(skb)->end_seq, high_seq))
1334
                        break;
1335
                if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1336
                        TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1337
                        tp->lost_out++;
1338
                }
1339
        }
1340
        tcp_sync_left_out(tp);
1341
}
1342
 
1343
/* Account newly detected lost packet(s) */
1344
 
1345
static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp)
1346
{
1347
        if (IsFack(tp)) {
1348
                int lost = tp->fackets_out - tp->reordering;
1349
                if (lost <= 0)
1350
                        lost = 1;
1351
                tcp_mark_head_lost(sk, tp, lost, tp->high_seq);
1352
        } else {
1353
                tcp_mark_head_lost(sk, tp, 1, tp->high_seq);
1354
        }
1355
 
1356
        /* New heuristics: it is possible only after we switched
1357
         * to restart timer each time when something is ACKed.
1358
         * Hence, we can detect timed out packets during fast
1359
         * retransmit without falling to slow start.
1360
         */
1361
        if (tcp_head_timedout(sk, tp)) {
1362
                struct sk_buff *skb;
1363
 
1364
                for_retrans_queue(skb, sk, tp) {
1365
                        if (tcp_skb_timedout(tp, skb) &&
1366
                            !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
1367
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1368
                                tp->lost_out++;
1369
                        }
1370
                }
1371
                tcp_sync_left_out(tp);
1372
        }
1373
}
1374
 
1375
/* CWND moderation, preventing bursts due to too big ACKs
1376
 * in dubious situations.
1377
 */
1378
static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp)
1379
{
1380
        tp->snd_cwnd = min(tp->snd_cwnd,
1381
                           tcp_packets_in_flight(tp)+tcp_max_burst(tp));
1382
        tp->snd_cwnd_stamp = tcp_time_stamp;
1383
}
1384
 
1385
/* Decrease cwnd each second ack. */
1386
 
1387
static void tcp_cwnd_down(struct tcp_opt *tp)
1388
{
1389
        int decr = tp->snd_cwnd_cnt + 1;
1390
        __u32 limit;
1391
 
1392
        /*
1393
         * TCP Westwood
1394
         * Here limit is evaluated as BWestimation*RTTmin (for obtaining it
1395
         * in packets we use mss_cache). If sysctl_tcp_westwood is off
1396
         * tcp_westwood_bw_rttmin() returns 0. In such case snd_ssthresh is
1397
         * still used as usual. It prevents other strange cases in which
1398
         * BWE*RTTmin could assume value 0. It should not happen but...
1399
         */
1400
 
1401
        if (!(limit = tcp_westwood_bw_rttmin(tp)))
1402
                limit = tp->snd_ssthresh/2;
1403
 
1404
        tp->snd_cwnd_cnt = decr&1;
1405
        decr >>= 1;
1406
 
1407
        if (decr && tp->snd_cwnd > limit)
1408
                tp->snd_cwnd -= decr;
1409
 
1410
        tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
1411
        tp->snd_cwnd_stamp = tcp_time_stamp;
1412
}
1413
 
1414
/* Nothing was retransmitted or returned timestamp is less
1415
 * than timestamp of the first retransmission.
1416
 */
1417
static __inline__ int tcp_packet_delayed(struct tcp_opt *tp)
1418
{
1419
        return !tp->retrans_stamp ||
1420
                (tp->saw_tstamp && tp->rcv_tsecr &&
1421
                 (__s32)(tp->rcv_tsecr - tp->retrans_stamp) < 0);
1422
}
1423
 
1424
/* Undo procedures. */
1425
 
1426
#if FASTRETRANS_DEBUG > 1
1427
static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg)
1428
{
1429
        printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n",
1430
               msg,
1431
               NIPQUAD(sk->daddr), ntohs(sk->dport),
1432
               tp->snd_cwnd, tp->left_out,
1433
               tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out);
1434
}
1435
#else
1436
#define DBGUNDO(x...) do { } while (0)
1437
#endif
1438
 
1439
static void tcp_undo_cwr(struct tcp_opt *tp, int undo)
1440
{
1441
        if (tp->prior_ssthresh) {
1442
                tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1);
1443
 
1444
                if (undo && tp->prior_ssthresh > tp->snd_ssthresh) {
1445
                        tp->snd_ssthresh = tp->prior_ssthresh;
1446
                        TCP_ECN_withdraw_cwr(tp);
1447
                }
1448
        } else {
1449
                tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
1450
        }
1451
        tcp_moderate_cwnd(tp);
1452
        tp->snd_cwnd_stamp = tcp_time_stamp;
1453
}
1454
 
1455
static inline int tcp_may_undo(struct tcp_opt *tp)
1456
{
1457
        return tp->undo_marker &&
1458
                (!tp->undo_retrans || tcp_packet_delayed(tp));
1459
}
1460
 
1461
/* People celebrate: "We love our President!" */
1462
static int tcp_try_undo_recovery(struct sock *sk, struct tcp_opt *tp)
1463
{
1464
        if (tcp_may_undo(tp)) {
1465
                /* Happy end! We did not retransmit anything
1466
                 * or our original transmission succeeded.
1467
                 */
1468
                DBGUNDO(sk, tp, tp->ca_state == TCP_CA_Loss ? "loss" : "retrans");
1469
                tcp_undo_cwr(tp, 1);
1470
                if (tp->ca_state == TCP_CA_Loss)
1471
                        NET_INC_STATS_BH(TCPLossUndo);
1472
                else
1473
                        NET_INC_STATS_BH(TCPFullUndo);
1474
                tp->undo_marker = 0;
1475
        }
1476
        if (tp->snd_una == tp->high_seq && IsReno(tp)) {
1477
                /* Hold old state until something *above* high_seq
1478
                 * is ACKed. For Reno it is MUST to prevent false
1479
                 * fast retransmits (RFC2582). SACK TCP is safe. */
1480
                tcp_moderate_cwnd(tp);
1481
                return 1;
1482
        }
1483
        tp->ca_state = TCP_CA_Open;
1484
        return 0;
1485
}
1486
 
1487
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
1488
static void tcp_try_undo_dsack(struct sock *sk, struct tcp_opt *tp)
1489
{
1490
        if (tp->undo_marker && !tp->undo_retrans) {
1491
                DBGUNDO(sk, tp, "D-SACK");
1492
                tcp_undo_cwr(tp, 1);
1493
                tp->undo_marker = 0;
1494
                NET_INC_STATS_BH(TCPDSACKUndo);
1495
        }
1496
}
1497
 
1498
/* Undo during fast recovery after partial ACK. */
1499
 
1500
static int tcp_try_undo_partial(struct sock *sk, struct tcp_opt *tp, int acked)
1501
{
1502
        /* Partial ACK arrived. Force Hoe's retransmit. */
1503
        int failed = IsReno(tp) || tp->fackets_out>tp->reordering;
1504
 
1505
        if (tcp_may_undo(tp)) {
1506
                /* Plain luck! Hole if filled with delayed
1507
                 * packet, rather than with a retransmit.
1508
                 */
1509
                if (tp->retrans_out == 0)
1510
                        tp->retrans_stamp = 0;
1511
 
1512
                tcp_update_reordering(tp, tcp_fackets_out(tp)+acked, 1);
1513
 
1514
                DBGUNDO(sk, tp, "Hoe");
1515
                tcp_undo_cwr(tp, 0);
1516
                NET_INC_STATS_BH(TCPPartialUndo);
1517
 
1518
                /* So... Do not make Hoe's retransmit yet.
1519
                 * If the first packet was delayed, the rest
1520
                 * ones are most probably delayed as well.
1521
                 */
1522
                failed = 0;
1523
        }
1524
        return failed;
1525
}
1526
 
1527
/* Undo during loss recovery after partial ACK. */
1528
static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp)
1529
{
1530
        if (tcp_may_undo(tp)) {
1531
                struct sk_buff *skb;
1532
                for_retrans_queue(skb, sk, tp) {
1533
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1534
                }
1535
                DBGUNDO(sk, tp, "partial loss");
1536
                tp->lost_out = 0;
1537
                tp->left_out = tp->sacked_out;
1538
                tcp_undo_cwr(tp, 1);
1539
                NET_INC_STATS_BH(TCPLossUndo);
1540
                tp->retransmits = 0;
1541
                tp->undo_marker = 0;
1542
                if (!IsReno(tp))
1543
                        tp->ca_state = TCP_CA_Open;
1544
                return 1;
1545
        }
1546
        return 0;
1547
}
1548
 
1549
static __inline__ void tcp_complete_cwr(struct tcp_opt *tp)
1550
{
1551
        if (!(tcp_westwood_complete_cwr(tp)))
1552
                tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
1553
        tp->snd_cwnd_stamp = tcp_time_stamp;
1554
}
1555
 
1556
static void tcp_try_to_open(struct sock *sk, struct tcp_opt *tp, int flag)
1557
{
1558
        tp->left_out = tp->sacked_out;
1559
 
1560
        if (tp->retrans_out == 0)
1561
                tp->retrans_stamp = 0;
1562
 
1563
        if (flag&FLAG_ECE)
1564
                tcp_enter_cwr(tp);
1565
 
1566
        if (tp->ca_state != TCP_CA_CWR) {
1567
                int state = TCP_CA_Open;
1568
 
1569
                if (tp->left_out ||
1570
                    tp->retrans_out ||
1571
                    tp->undo_marker)
1572
                        state = TCP_CA_Disorder;
1573
 
1574
                if (tp->ca_state != state) {
1575
                        tp->ca_state = state;
1576
                        tp->high_seq = tp->snd_nxt;
1577
                }
1578
                tcp_moderate_cwnd(tp);
1579
        } else {
1580
                tcp_cwnd_down(tp);
1581
        }
1582
}
1583
 
1584
/* Process an event, which can update packets-in-flight not trivially.
1585
 * Main goal of this function is to calculate new estimate for left_out,
1586
 * taking into account both packets sitting in receiver's buffer and
1587
 * packets lost by network.
1588
 *
1589
 * Besides that it does CWND reduction, when packet loss is detected
1590
 * and changes state of machine.
1591
 *
1592
 * It does _not_ decide what to send, it is made in function
1593
 * tcp_xmit_retransmit_queue().
1594
 */
1595
static void
1596
tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1597
                      int prior_packets, int flag)
1598
{
1599
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
1600
        int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP));
1601
 
1602
        /* Some technical things:
1603
         * 1. Reno does not count dupacks (sacked_out) automatically. */
1604
        if (!tp->packets_out)
1605
                tp->sacked_out = 0;
1606
        /* 2. SACK counts snd_fack in packets inaccurately. */
1607
        if (tp->sacked_out == 0)
1608
                tp->fackets_out = 0;
1609
 
1610
        /* Now state machine starts.
1611
         * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
1612
        if (flag&FLAG_ECE)
1613
                tp->prior_ssthresh = 0;
1614
 
1615
        /* B. In all the states check for reneging SACKs. */
1616
        if (tp->sacked_out && tcp_check_sack_reneging(sk, tp))
1617
                return;
1618
 
1619
        /* C. Process data loss notification, provided it is valid. */
1620
        if ((flag&FLAG_DATA_LOST) &&
1621
            before(tp->snd_una, tp->high_seq) &&
1622
            tp->ca_state != TCP_CA_Open &&
1623
            tp->fackets_out > tp->reordering) {
1624
                tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq);
1625
                NET_INC_STATS_BH(TCPLoss);
1626
        }
1627
 
1628
        /* D. Synchronize left_out to current state. */
1629
        tcp_sync_left_out(tp);
1630
 
1631
        /* E. Check state exit conditions. State can be terminated
1632
         *    when high_seq is ACKed. */
1633
        if (tp->ca_state == TCP_CA_Open) {
1634
                if (!sysctl_tcp_frto)
1635
                        BUG_TRAP(tp->retrans_out == 0);
1636
                tp->retrans_stamp = 0;
1637
        } else if (!before(tp->snd_una, tp->high_seq)) {
1638
                switch (tp->ca_state) {
1639
                case TCP_CA_Loss:
1640
                        tp->retransmits = 0;
1641
                        if (tcp_try_undo_recovery(sk, tp))
1642
                                return;
1643
                        break;
1644
 
1645
                case TCP_CA_CWR:
1646
                        /* CWR is to be held something *above* high_seq
1647
                         * is ACKed for CWR bit to reach receiver. */
1648
                        if (tp->snd_una != tp->high_seq) {
1649
                                tcp_complete_cwr(tp);
1650
                                tp->ca_state = TCP_CA_Open;
1651
                        }
1652
                        break;
1653
 
1654
                case TCP_CA_Disorder:
1655
                        tcp_try_undo_dsack(sk, tp);
1656
                        if (!tp->undo_marker ||
1657
                            /* For SACK case do not Open to allow to undo
1658
                             * catching for all duplicate ACKs. */
1659
                            IsReno(tp) || tp->snd_una != tp->high_seq) {
1660
                                tp->undo_marker = 0;
1661
                                tp->ca_state = TCP_CA_Open;
1662
                        }
1663
                        break;
1664
 
1665
                case TCP_CA_Recovery:
1666
                        if (IsReno(tp))
1667
                                tcp_reset_reno_sack(tp);
1668
                        if (tcp_try_undo_recovery(sk, tp))
1669
                                return;
1670
                        tcp_complete_cwr(tp);
1671
                        break;
1672
                }
1673
        }
1674
 
1675
        /* F. Process state. */
1676
        switch (tp->ca_state) {
1677
        case TCP_CA_Recovery:
1678
                if (prior_snd_una == tp->snd_una) {
1679
                        if (IsReno(tp) && is_dupack)
1680
                                tcp_add_reno_sack(tp);
1681
                } else {
1682
                        int acked = prior_packets - tp->packets_out;
1683
                        if (IsReno(tp))
1684
                                tcp_remove_reno_sacks(sk, tp, acked);
1685
                        is_dupack = tcp_try_undo_partial(sk, tp, acked);
1686
                }
1687
                break;
1688
        case TCP_CA_Loss:
1689
                if (flag&FLAG_DATA_ACKED)
1690
                        tp->retransmits = 0;
1691
                if (!tcp_try_undo_loss(sk, tp)) {
1692
                        tcp_moderate_cwnd(tp);
1693
                        tcp_xmit_retransmit_queue(sk);
1694
                        return;
1695
                }
1696
                if (tp->ca_state != TCP_CA_Open)
1697
                        return;
1698
                /* Loss is undone; fall through to processing in Open state. */
1699
        default:
1700
                if (IsReno(tp)) {
1701
                        if (tp->snd_una != prior_snd_una)
1702
                                tcp_reset_reno_sack(tp);
1703
                        if (is_dupack)
1704
                                tcp_add_reno_sack(tp);
1705
                }
1706
 
1707
                if (tp->ca_state == TCP_CA_Disorder)
1708
                        tcp_try_undo_dsack(sk, tp);
1709
 
1710
                if (!tcp_time_to_recover(sk, tp)) {
1711
                        tcp_try_to_open(sk, tp, flag);
1712
                        return;
1713
                }
1714
 
1715
                /* Otherwise enter Recovery state */
1716
 
1717
                if (IsReno(tp))
1718
                        NET_INC_STATS_BH(TCPRenoRecovery);
1719
                else
1720
                        NET_INC_STATS_BH(TCPSackRecovery);
1721
 
1722
                tp->high_seq = tp->snd_nxt;
1723
                tp->prior_ssthresh = 0;
1724
                tp->undo_marker = tp->snd_una;
1725
                tp->undo_retrans = tp->retrans_out;
1726
 
1727
                if (tp->ca_state < TCP_CA_CWR) {
1728
                        if (!(flag&FLAG_ECE))
1729
                                tp->prior_ssthresh = tcp_current_ssthresh(tp);
1730
                        tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1731
                        TCP_ECN_queue_cwr(tp);
1732
                }
1733
 
1734
                tp->snd_cwnd_cnt = 0;
1735
                tp->ca_state = TCP_CA_Recovery;
1736
        }
1737
 
1738
        if (is_dupack || tcp_head_timedout(sk, tp))
1739
                tcp_update_scoreboard(sk, tp);
1740
        tcp_cwnd_down(tp);
1741
        tcp_xmit_retransmit_queue(sk);
1742
}
1743
 
1744
/* Read draft-ietf-tcplw-high-performance before mucking
1745
 * with this code. (Superceeds RFC1323)
1746
 */
1747
static void tcp_ack_saw_tstamp(struct tcp_opt *tp, int flag)
1748
{
1749
        __u32 seq_rtt;
1750
 
1751
        /* RTTM Rule: A TSecr value received in a segment is used to
1752
         * update the averaged RTT measurement only if the segment
1753
         * acknowledges some new data, i.e., only if it advances the
1754
         * left edge of the send window.
1755
         *
1756
         * See draft-ietf-tcplw-high-performance-00, section 3.3.
1757
         * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
1758
         *
1759
         * Changed: reset backoff as soon as we see the first valid sample.
1760
         * If we do not, we get strongly overstimated rto. With timestamps
1761
         * samples are accepted even from very old segments: f.e., when rtt=1
1762
         * increases to 8, we retransmit 5 times and after 8 seconds delayed
1763
         * answer arrives rto becomes 120 seconds! If at least one of segments
1764
         * in window is lost... Voila.                          --ANK (010210)
1765
         */
1766
        seq_rtt = tcp_time_stamp - tp->rcv_tsecr;
1767
        tcp_rtt_estimator(tp, seq_rtt);
1768
        tcp_set_rto(tp);
1769
        tp->backoff = 0;
1770
        tcp_bound_rto(tp);
1771
}
1772
 
1773
static void tcp_ack_no_tstamp(struct tcp_opt *tp, u32 seq_rtt, int flag)
1774
{
1775
        /* We don't have a timestamp. Can only use
1776
         * packets that are not retransmitted to determine
1777
         * rtt estimates. Also, we must not reset the
1778
         * backoff for rto until we get a non-retransmitted
1779
         * packet. This allows us to deal with a situation
1780
         * where the network delay has increased suddenly.
1781
         * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
1782
         */
1783
 
1784
        if (flag & FLAG_RETRANS_DATA_ACKED)
1785
                return;
1786
 
1787
        tcp_rtt_estimator(tp, seq_rtt);
1788
        tcp_set_rto(tp);
1789
        tp->backoff = 0;
1790
        tcp_bound_rto(tp);
1791
}
1792
 
1793
static __inline__ void
1794
tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt)
1795
{
1796
        /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
1797
        if (tp->saw_tstamp && tp->rcv_tsecr)
1798
                tcp_ack_saw_tstamp(tp, flag);
1799
        else if (seq_rtt >= 0)
1800
                tcp_ack_no_tstamp(tp, seq_rtt, flag);
1801
}
1802
 
1803
/* This is Jacobson's slow start and congestion avoidance.
1804
 * SIGCOMM '88, p. 328.
1805
 */
1806
static __inline__ void tcp_cong_avoid(struct tcp_opt *tp)
1807
{
1808
        if (tp->snd_cwnd <= tp->snd_ssthresh) {
1809
                /* In "safe" area, increase. */
1810
                if (tp->snd_cwnd < tp->snd_cwnd_clamp)
1811
                        tp->snd_cwnd++;
1812
        } else {
1813
                /* In dangerous area, increase slowly.
1814
                 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
1815
                 */
1816
                if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
1817
                        if (tp->snd_cwnd < tp->snd_cwnd_clamp)
1818
                                tp->snd_cwnd++;
1819
                        tp->snd_cwnd_cnt=0;
1820
                } else
1821
                        tp->snd_cwnd_cnt++;
1822
        }
1823
        tp->snd_cwnd_stamp = tcp_time_stamp;
1824
}
1825
 
1826
/* Restart timer after forward progress on connection.
1827
 * RFC2988 recommends to restart timer to now+rto.
1828
 */
1829
 
1830
static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp)
1831
{
1832
        if (tp->packets_out==0) {
1833
                tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS);
1834
        } else {
1835
                tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
1836
        }
1837
}
1838
 
1839
/* Remove acknowledged frames from the retransmission queue. */
1840
static int tcp_clean_rtx_queue(struct sock *sk)
1841
{
1842
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
1843
        struct sk_buff *skb;
1844
        __u32 now = tcp_time_stamp;
1845
        int acked = 0;
1846
        __s32 seq_rtt = -1;
1847
 
1848
        while((skb=skb_peek(&sk->write_queue)) && (skb != tp->send_head)) {
1849
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
1850
                __u8 sacked = scb->sacked;
1851
 
1852
                /* If our packet is before the ack sequence we can
1853
                 * discard it as it's confirmed to have arrived at
1854
                 * the other end.
1855
                 */
1856
                if (after(scb->end_seq, tp->snd_una))
1857
                        break;
1858
 
1859
                /* Initial outgoing SYN's get put onto the write_queue
1860
                 * just like anything else we transmit.  It is not
1861
                 * true data, and if we misinform our callers that
1862
                 * this ACK acks real data, we will erroneously exit
1863
                 * connection startup slow start one packet too
1864
                 * quickly.  This is severely frowned upon behavior.
1865
                 */
1866
                if(!(scb->flags & TCPCB_FLAG_SYN)) {
1867
                        acked |= FLAG_DATA_ACKED;
1868
                } else {
1869
                        acked |= FLAG_SYN_ACKED;
1870
                        tp->retrans_stamp = 0;
1871
                }
1872
 
1873
                if (sacked) {
1874
                        if(sacked & TCPCB_RETRANS) {
1875
                                if(sacked & TCPCB_SACKED_RETRANS)
1876
                                        tp->retrans_out--;
1877
                                acked |= FLAG_RETRANS_DATA_ACKED;
1878
                                seq_rtt = -1;
1879
                        } else if (seq_rtt < 0)
1880
                                seq_rtt = now - scb->when;
1881
                        if(sacked & TCPCB_SACKED_ACKED)
1882
                                tp->sacked_out--;
1883
                        if(sacked & TCPCB_LOST)
1884
                                tp->lost_out--;
1885
                        if(sacked & TCPCB_URG) {
1886
                                if (tp->urg_mode &&
1887
                                    !before(scb->end_seq, tp->snd_up))
1888
                                        tp->urg_mode = 0;
1889
                        }
1890
                } else if (seq_rtt < 0)
1891
                        seq_rtt = now - scb->when;
1892
                if(tp->fackets_out)
1893
                        tp->fackets_out--;
1894
                tp->packets_out--;
1895
                __skb_unlink(skb, skb->list);
1896
                tcp_free_skb(sk, skb);
1897
        }
1898
 
1899
        if (acked&FLAG_ACKED) {
1900
                tcp_ack_update_rtt(tp, acked, seq_rtt);
1901
                tcp_ack_packets_out(sk, tp);
1902
        }
1903
 
1904
#if FASTRETRANS_DEBUG > 0
1905
        BUG_TRAP((int)tp->sacked_out >= 0);
1906
        BUG_TRAP((int)tp->lost_out >= 0);
1907
        BUG_TRAP((int)tp->retrans_out >= 0);
1908
        if (tp->packets_out==0 && tp->sack_ok) {
1909
                if (tp->lost_out) {
1910
                        printk(KERN_DEBUG "Leak l=%u %d\n", tp->lost_out, tp->ca_state);
1911
                        tp->lost_out = 0;
1912
                }
1913
                if (tp->sacked_out) {
1914
                        printk(KERN_DEBUG "Leak s=%u %d\n", tp->sacked_out, tp->ca_state);
1915
                        tp->sacked_out = 0;
1916
                }
1917
                if (tp->retrans_out) {
1918
                        printk(KERN_DEBUG "Leak r=%u %d\n", tp->retrans_out, tp->ca_state);
1919
                        tp->retrans_out = 0;
1920
                }
1921
        }
1922
#endif
1923
        return acked;
1924
}
1925
 
1926
static void tcp_ack_probe(struct sock *sk)
1927
{
1928
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
1929
 
1930
        /* Was it a usable window open? */
1931
 
1932
        if (!after(TCP_SKB_CB(tp->send_head)->end_seq, tp->snd_una + tp->snd_wnd)) {
1933
                tp->backoff = 0;
1934
                tcp_clear_xmit_timer(sk, TCP_TIME_PROBE0);
1935
                /* Socket must be waked up by subsequent tcp_data_snd_check().
1936
                 * This function is not for random using!
1937
                 */
1938
        } else {
1939
                tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0,
1940
                                     min(tp->rto << tp->backoff, TCP_RTO_MAX));
1941
        }
1942
}
1943
 
1944
static __inline__ int tcp_ack_is_dubious(struct tcp_opt *tp, int flag)
1945
{
1946
        return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
1947
                tp->ca_state != TCP_CA_Open);
1948
}
1949
 
1950
static __inline__ int tcp_may_raise_cwnd(struct tcp_opt *tp, int flag)
1951
{
1952
        return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
1953
                !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR));
1954
}
1955
 
1956
/* Check that window update is acceptable.
1957
 * The function assumes that snd_una<=ack<=snd_next.
1958
 */
1959
static __inline__ int
1960
tcp_may_update_window(struct tcp_opt *tp, u32 ack, u32 ack_seq, u32 nwin)
1961
{
1962
        return (after(ack, tp->snd_una) ||
1963
                after(ack_seq, tp->snd_wl1) ||
1964
                (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
1965
}
1966
 
1967
/* Update our send window.
1968
 *
1969
 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
1970
 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
1971
 */
1972
static int tcp_ack_update_window(struct sock *sk, struct tcp_opt *tp,
1973
                                 struct sk_buff *skb, u32 ack, u32 ack_seq)
1974
{
1975
        int flag = 0;
1976
        u32 nwin = ntohs(skb->h.th->window);
1977
 
1978
        if (likely(!skb->h.th->syn))
1979
                nwin <<= tp->snd_wscale;
1980
 
1981
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
1982
                flag |= FLAG_WIN_UPDATE;
1983
                tcp_update_wl(tp, ack, ack_seq);
1984
 
1985
                if (tp->snd_wnd != nwin) {
1986
                        tp->snd_wnd = nwin;
1987
 
1988
                        /* Note, it is the only place, where
1989
                         * fast path is recovered for sending TCP.
1990
                         */
1991
                        tcp_fast_path_check(sk, tp);
1992
 
1993
                        if (nwin > tp->max_window) {
1994
                                tp->max_window = nwin;
1995
                                tcp_sync_mss(sk, tp->pmtu_cookie);
1996
                        }
1997
                }
1998
        }
1999
 
2000
        tp->snd_una = ack;
2001
 
2002
        return flag;
2003
}
2004
 
2005
static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
2006
{
2007
        struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2008
 
2009
        tcp_sync_left_out(tp);
2010
 
2011
        if (tp->snd_una == prior_snd_una ||
2012
            !before(tp->snd_una, tp->frto_highmark)) {
2013
                /* RTO was caused by loss, start retransmitting in
2014
                 * go-back-N slow start
2015
                 */
2016
                tcp_enter_frto_loss(sk);
2017
                return;
2018
        }
2019
 
2020
        if (tp->frto_counter == 1) {
2021
                /* First ACK after RTO advances the window: allow two new
2022
                 * segments out.
2023
                 */
2024
                tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
2025
        } else {
2026
                /* Also the second ACK after RTO advances the window.
2027
                 * The RTO was likely spurious. Reduce cwnd and continue
2028
                 * in congestion avoidance
2029
                 */
2030
                tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
2031
                tcp_moderate_cwnd(tp);
2032
        }
2033
 
2034
        /* F-RTO affects on two new ACKs following RTO.
2035
         * At latest on third ACK the TCP behavor is back to normal.
2036
         */
2037
        tp->frto_counter = (tp->frto_counter + 1) % 3;
2038
}
2039
 
2040
/*
2041
 * TCP Westwood+
2042
 */
2043
 
2044
/*
2045
 * @westwood_do_filter
2046
 * Low-pass filter. Implemented using constant coeffients.
2047
 */
2048
 
2049
static inline __u32 westwood_do_filter(__u32 a, __u32 b)
2050
{
2051
        return (((7 * a) + b) >> 3);
2052
}
2053
 
2054
static void westwood_filter(struct sock *sk, __u32 delta)
2055
{
2056
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2057
 
2058
        tp->westwood.bw_ns_est =
2059
                westwood_do_filter(tp->westwood.bw_ns_est,
2060
                                   tp->westwood.bk / delta);
2061
        tp->westwood.bw_est =
2062
                westwood_do_filter(tp->westwood.bw_est,
2063
                                   tp->westwood.bw_ns_est);
2064
}
2065
 
2066
/* @westwood_update_rttmin
2067
 * It is used to update RTTmin. In this case we MUST NOT use
2068
 * WESTWOOD_RTT_MIN minimum bound since we could be on a LAN!
2069
 */
2070
 
2071
static inline __u32 westwood_update_rttmin(struct sock *sk)
2072
{
2073
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2074
        __u32 rttmin = tp->westwood.rtt_min;
2075
 
2076
        if (tp->westwood.rtt == 0)
2077
                return rttmin;
2078
 
2079
        if (tp->westwood.rtt < tp->westwood.rtt_min || !rttmin)
2080
                rttmin = tp->westwood.rtt;
2081
 
2082
        return rttmin;
2083
}
2084
 
2085
/*
2086
 * @westwood_acked
2087
 * Evaluate increases for dk.
2088
 */
2089
 
2090
static __u32 westwood_acked(struct sock *sk)
2091
{
2092
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2093
 
2094
        return ((tp->snd_una) - (tp->westwood.snd_una));
2095
}
2096
 
2097
/*
2098
 * @westwood_new_window
2099
 * It evaluates if we are receiving data inside the same RTT window as
2100
 * when we started.
2101
 * Return value:
2102
 * It returns 0 if we are still evaluating samples in the same RTT
2103
 * window, 1 if the sample has to be considered in the next window.
2104
 */
2105
 
2106
static int westwood_new_window(struct sock *sk)
2107
{
2108
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2109
        __u32 left_bound;
2110
        __u32 rtt;
2111
        int ret = 0;
2112
 
2113
        left_bound = tp->westwood.rtt_win_sx;
2114
        rtt = max(tp->westwood.rtt, (__u32)TCP_WESTWOOD_RTT_MIN);
2115
 
2116
        /*
2117
         * A RTT-window has passed. Be careful since if RTT is less than
2118
         * 50ms we don't filter but we continue 'building the sample'.
2119
         * This minimum limit was choosen since an estimation on small
2120
         * time intervals is better to avoid...
2121
         * Obvioulsy on a LAN we reasonably will always have
2122
         * right_bound = left_bound + WESTWOOD_RTT_MIN
2123
         */
2124
 
2125
        if ((left_bound + rtt) < tcp_time_stamp)
2126
                ret = 1;
2127
 
2128
        return ret;
2129
}
2130
 
2131
/*
2132
 * @westwood_update_window
2133
 * It updates RTT evaluation window if it is the right moment to do
2134
 * it. If so it calls filter for evaluating bandwidth.
2135
 */
2136
 
2137
static void __westwood_update_window(struct sock *sk, __u32 now)
2138
{
2139
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2140
        __u32 delta = now - tp->westwood.rtt_win_sx;
2141
 
2142
        if (!delta)
2143
                return;
2144
 
2145
        if (tp->westwood.rtt)
2146
                westwood_filter(sk, delta);
2147
 
2148
        tp->westwood.bk = 0;
2149
        tp->westwood.rtt_win_sx = tcp_time_stamp;
2150
}
2151
 
2152
static void westwood_update_window(struct sock *sk, __u32 now)
2153
{
2154
        if (westwood_new_window(sk))
2155
                __westwood_update_window(sk, now);
2156
}
2157
 
2158
/*
2159
 * @__tcp_westwood_fast_bw
2160
 * It is called when we are in fast path. In particular it is called when
2161
 * header prediction is successfull. In such case infact update is
2162
 * straight forward and doesn't need any particular care.
2163
 */
2164
 
2165
void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
2166
{
2167
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2168
 
2169
        westwood_update_window(sk, tcp_time_stamp);
2170
 
2171
        tp->westwood.bk += westwood_acked(sk);
2172
        tp->westwood.snd_una = tp->snd_una;
2173
        tp->westwood.rtt_min = westwood_update_rttmin(sk);
2174
}
2175
 
2176
/*
2177
 * @westwood_mss
2178
 * This function was inserted just to have the possibility to evaluate
2179
 * which value of MSS is better. Infact we can use neither mss_cache or
2180
 * mss_cache. Just testing we will know it!
2181
 */
2182
 
2183
static inline __u32 westwood_mss(struct tcp_opt *tp)
2184
{
2185
        return ((__u32)(tp->mss_cache));
2186
}
2187
 
2188
/*
2189
 * @tcp_westwood_dupack_update
2190
 * It updates accounted and cumul_ack when receiving a dupack.
2191
 */
2192
 
2193
static void westwood_dupack_update(struct sock *sk)
2194
{
2195
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2196
 
2197
        tp->westwood.accounted += westwood_mss(tp);
2198
        tp->westwood.cumul_ack = westwood_mss(tp);
2199
}
2200
 
2201
static inline int westwood_may_change_cumul(struct tcp_opt *tp)
2202
{
2203
        return ((tp->westwood.cumul_ack) > westwood_mss(tp));
2204
}
2205
 
2206
static inline void westwood_partial_update(struct tcp_opt *tp)
2207
{
2208
        tp->westwood.accounted -= tp->westwood.cumul_ack;
2209
        tp->westwood.cumul_ack = westwood_mss(tp);
2210
}
2211
 
2212
static inline void westwood_complete_update(struct tcp_opt *tp)
2213
{
2214
        tp->westwood.cumul_ack -= tp->westwood.accounted;
2215
        tp->westwood.accounted = 0;
2216
}
2217
 
2218
/*
2219
 * @westwood_acked_count
2220
 * This function evaluates cumul_ack for evaluating dk in case of
2221
 * delayed or partial acks.
2222
 */
2223
 
2224
static __u32 westwood_acked_count(struct sock *sk)
2225
{
2226
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2227
 
2228
        tp->westwood.cumul_ack = westwood_acked(sk);
2229
 
2230
        /* If cumul_ack is 0 this is a dupack since it's not moving
2231
         * tp->snd_una.
2232
         */
2233
        if (!(tp->westwood.cumul_ack))
2234
                westwood_dupack_update(sk);
2235
 
2236
        if (westwood_may_change_cumul(tp)) {
2237
                /* Partial or delayed ack */
2238
                if ((tp->westwood.accounted) >= (tp->westwood.cumul_ack))
2239
                        westwood_partial_update(tp);
2240
                else
2241
                        westwood_complete_update(tp);
2242
        }
2243
 
2244
        tp->westwood.snd_una = tp->snd_una;
2245
 
2246
        return tp->westwood.cumul_ack;
2247
}
2248
 
2249
/*
2250
 * @__tcp_westwood_slow_bw
2251
 * It is called when something is going wrong..even if there could
2252
 * be no problems! Infact a simple delayed packet may trigger a
2253
 * dupack. But we need to be careful in such case.
2254
 */
2255
 
2256
void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
2257
{
2258
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2259
 
2260
        westwood_update_window(sk, tcp_time_stamp);
2261
 
2262
        tp->westwood.bk += westwood_acked_count(sk);
2263
        tp->westwood.rtt_min = westwood_update_rttmin(sk);
2264
}
2265
 
2266
/* TCP Westwood+ routines end here */
2267
 
2268
/* This routine deals with incoming acks, but not outgoing ones. */
2269
static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2270
{
2271
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2272
        u32 prior_snd_una = tp->snd_una;
2273
        u32 ack_seq = TCP_SKB_CB(skb)->seq;
2274
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
2275
        u32 prior_in_flight;
2276
        int prior_packets;
2277
 
2278
        /* If the ack is newer than sent or older than previous acks
2279
         * then we can probably ignore it.
2280
         */
2281
        if (after(ack, tp->snd_nxt))
2282
                goto uninteresting_ack;
2283
 
2284
        if (before(ack, prior_snd_una))
2285
                goto old_ack;
2286
 
2287
        if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
2288
                /* Window is constant, pure forward advance.
2289
                 * No more checks are required.
2290
                 * Note, we use the fact that SND.UNA>=SND.WL2.
2291
                 */
2292
                tcp_update_wl(tp, ack, ack_seq);
2293
                tp->snd_una = ack;
2294
                tcp_westwood_fast_bw(sk, skb);
2295
                flag |= FLAG_WIN_UPDATE;
2296
 
2297
                NET_INC_STATS_BH(TCPHPAcks);
2298
        } else {
2299
                if (ack_seq != TCP_SKB_CB(skb)->end_seq)
2300
                        flag |= FLAG_DATA;
2301
                else
2302
                        NET_INC_STATS_BH(TCPPureAcks);
2303
 
2304
                flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq);
2305
 
2306
                if (TCP_SKB_CB(skb)->sacked)
2307
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2308
 
2309
                if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
2310
                        flag |= FLAG_ECE;
2311
 
2312
                tcp_westwood_slow_bw(sk, skb);
2313
        }
2314
 
2315
        /* We passed data and got it acked, remove any soft error
2316
         * log. Something worked...
2317
         */
2318
        sk->err_soft = 0;
2319
        tp->rcv_tstamp = tcp_time_stamp;
2320
        if ((prior_packets = tp->packets_out) == 0)
2321
                goto no_queue;
2322
 
2323
        prior_in_flight = tcp_packets_in_flight(tp);
2324
 
2325
        /* See if we can take anything off of the retransmit queue. */
2326
        flag |= tcp_clean_rtx_queue(sk);
2327
 
2328
        if (tp->frto_counter)
2329
                tcp_process_frto(sk, prior_snd_una);
2330
 
2331
        if (tcp_ack_is_dubious(tp, flag)) {
2332
                /* Advanve CWND, if state allows this. */
2333
                if ((flag&FLAG_DATA_ACKED) && prior_in_flight >= tp->snd_cwnd &&
2334
                    tcp_may_raise_cwnd(tp, flag))
2335
                        tcp_cong_avoid(tp);
2336
                tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
2337
        } else {
2338
                if ((flag&FLAG_DATA_ACKED) && prior_in_flight >= tp->snd_cwnd)
2339
                        tcp_cong_avoid(tp);
2340
        }
2341
 
2342
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag&FLAG_NOT_DUP))
2343
                dst_confirm(sk->dst_cache);
2344
 
2345
        return 1;
2346
 
2347
no_queue:
2348
        tp->probes_out = 0;
2349
 
2350
        /* If this ack opens up a zero window, clear backoff.  It was
2351
         * being used to time the probes, and is probably far higher than
2352
         * it needs to be for normal retransmission.
2353
         */
2354
        if (tp->send_head)
2355
                tcp_ack_probe(sk);
2356
        return 1;
2357
 
2358
old_ack:
2359
        if (TCP_SKB_CB(skb)->sacked)
2360
                tcp_sacktag_write_queue(sk, skb, prior_snd_una);
2361
 
2362
uninteresting_ack:
2363
        SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
2364
        return 0;
2365
}
2366
 
2367
 
2368
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
2369
 * But, this can also be called on packets in the established flow when
2370
 * the fast version below fails.
2371
 */
2372
void tcp_parse_options(struct sk_buff *skb, struct tcp_opt *tp, int estab)
2373
{
2374
        unsigned char *ptr;
2375
        struct tcphdr *th = skb->h.th;
2376
        int length=(th->doff*4)-sizeof(struct tcphdr);
2377
 
2378
        ptr = (unsigned char *)(th + 1);
2379
        tp->saw_tstamp = 0;
2380
 
2381
        while(length>0) {
2382
                int opcode=*ptr++;
2383
                int opsize;
2384
 
2385
                switch (opcode) {
2386
                        case TCPOPT_EOL:
2387
                                return;
2388
                        case TCPOPT_NOP:        /* Ref: RFC 793 section 3.1 */
2389
                                length--;
2390
                                continue;
2391
                        default:
2392
                                opsize=*ptr++;
2393
                                if (opsize < 2) /* "silly options" */
2394
                                        return;
2395
                                if (opsize > length)
2396
                                        return; /* don't parse partial options */
2397
                                switch(opcode) {
2398
                                case TCPOPT_MSS:
2399
                                        if(opsize==TCPOLEN_MSS && th->syn && !estab) {
2400
                                                u16 in_mss = ntohs(*(__u16 *)ptr);
2401
                                                if (in_mss) {
2402
                                                        if (tp->user_mss && tp->user_mss < in_mss)
2403
                                                                in_mss = tp->user_mss;
2404
                                                        tp->mss_clamp = in_mss;
2405
                                                }
2406
                                        }
2407
                                        break;
2408
                                case TCPOPT_WINDOW:
2409
                                        if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
2410
                                                if (sysctl_tcp_window_scaling) {
2411
                                                        tp->wscale_ok = 1;
2412
                                                        tp->snd_wscale = *(__u8 *)ptr;
2413
                                                        if(tp->snd_wscale > 14) {
2414
                                                                if(net_ratelimit())
2415
                                                                        printk("tcp_parse_options: Illegal window "
2416
                                                                               "scaling value %d >14 received.",
2417
                                                                               tp->snd_wscale);
2418
                                                                tp->snd_wscale = 14;
2419
                                                        }
2420
                                                }
2421
                                        break;
2422
                                case TCPOPT_TIMESTAMP:
2423
                                        if(opsize==TCPOLEN_TIMESTAMP) {
2424
                                                if ((estab && tp->tstamp_ok) ||
2425
                                                    (!estab && sysctl_tcp_timestamps)) {
2426
                                                        tp->saw_tstamp = 1;
2427
                                                        tp->rcv_tsval = ntohl(*(__u32 *)ptr);
2428
                                                        tp->rcv_tsecr = ntohl(*(__u32 *)(ptr+4));
2429
                                                }
2430
                                        }
2431
                                        break;
2432
                                case TCPOPT_SACK_PERM:
2433
                                        if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
2434
                                                if (sysctl_tcp_sack) {
2435
                                                        tp->sack_ok = 1;
2436
                                                        tcp_sack_reset(tp);
2437
                                                }
2438
                                        }
2439
                                        break;
2440
 
2441
                                case TCPOPT_SACK:
2442
                                        if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
2443
                                           !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
2444
                                           tp->sack_ok) {
2445
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
2446
                                        }
2447
                                };
2448
                                ptr+=opsize-2;
2449
                                length-=opsize;
2450
                };
2451
        }
2452
}
2453
 
2454
/* Fast parse options. This hopes to only see timestamps.
2455
 * If it is wrong it falls back on tcp_parse_options().
2456
 */
2457
static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, struct tcp_opt *tp)
2458
{
2459
        if (th->doff == sizeof(struct tcphdr)>>2) {
2460
                tp->saw_tstamp = 0;
2461
                return 0;
2462
        } else if (tp->tstamp_ok &&
2463
                   th->doff == (sizeof(struct tcphdr)>>2)+(TCPOLEN_TSTAMP_ALIGNED>>2)) {
2464
                __u32 *ptr = (__u32 *)(th + 1);
2465
                if (*ptr == ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
2466
                                  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
2467
                        tp->saw_tstamp = 1;
2468
                        ++ptr;
2469
                        tp->rcv_tsval = ntohl(*ptr);
2470
                        ++ptr;
2471
                        tp->rcv_tsecr = ntohl(*ptr);
2472
                        return 1;
2473
                }
2474
        }
2475
        tcp_parse_options(skb, tp, 1);
2476
        return 1;
2477
}
2478
 
2479
extern __inline__ void
2480
tcp_store_ts_recent(struct tcp_opt *tp)
2481
{
2482
        tp->ts_recent = tp->rcv_tsval;
2483
        tp->ts_recent_stamp = xtime.tv_sec;
2484
}
2485
 
2486
extern __inline__ void
2487
tcp_replace_ts_recent(struct tcp_opt *tp, u32 seq)
2488
{
2489
        if (tp->saw_tstamp && !after(seq, tp->rcv_wup)) {
2490
                /* PAWS bug workaround wrt. ACK frames, the PAWS discard
2491
                 * extra check below makes sure this can only happen
2492
                 * for pure ACK frames.  -DaveM
2493
                 *
2494
                 * Not only, also it occurs for expired timestamps.
2495
                 */
2496
 
2497
                if((s32)(tp->rcv_tsval - tp->ts_recent) >= 0 ||
2498
                   xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
2499
                        tcp_store_ts_recent(tp);
2500
        }
2501
}
2502
 
2503
/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
2504
 *
2505
 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
2506
 * it can pass through stack. So, the following predicate verifies that
2507
 * this segment is not used for anything but congestion avoidance or
2508
 * fast retransmit. Moreover, we even are able to eliminate most of such
2509
 * second order effects, if we apply some small "replay" window (~RTO)
2510
 * to timestamp space.
2511
 *
2512
 * All these measures still do not guarantee that we reject wrapped ACKs
2513
 * on networks with high bandwidth, when sequence space is recycled fastly,
2514
 * but it guarantees that such events will be very rare and do not affect
2515
 * connection seriously. This doesn't look nice, but alas, PAWS is really
2516
 * buggy extension.
2517
 *
2518
 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
2519
 * states that events when retransmit arrives after original data are rare.
2520
 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
2521
 * the biggest problem on large power networks even with minor reordering.
2522
 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
2523
 * up to bandwidth of 18Gigabit/sec. 8) ]
2524
 */
2525
 
2526
static int tcp_disordered_ack(struct tcp_opt *tp, struct sk_buff *skb)
2527
{
2528
        struct tcphdr *th = skb->h.th;
2529
        u32 seq = TCP_SKB_CB(skb)->seq;
2530
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
2531
 
2532
        return (/* 1. Pure ACK with correct sequence number. */
2533
                (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
2534
 
2535
                /* 2. ... and duplicate ACK. */
2536
                ack == tp->snd_una &&
2537
 
2538
                /* 3. ... and does not update window. */
2539
                !tcp_may_update_window(tp, ack, seq, ntohs(th->window)<<tp->snd_wscale) &&
2540
 
2541
                /* 4. ... and sits in replay window. */
2542
                (s32)(tp->ts_recent - tp->rcv_tsval) <= (tp->rto*1024)/HZ);
2543
}
2544
 
2545
extern __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct sk_buff *skb)
2546
{
2547
        return ((s32)(tp->ts_recent - tp->rcv_tsval) > TCP_PAWS_WINDOW &&
2548
                xtime.tv_sec < tp->ts_recent_stamp + TCP_PAWS_24DAYS &&
2549
                !tcp_disordered_ack(tp, skb));
2550
}
2551
 
2552
/* Check segment sequence number for validity.
2553
 *
2554
 * Segment controls are considered valid, if the segment
2555
 * fits to the window after truncation to the window. Acceptability
2556
 * of data (and SYN, FIN, of course) is checked separately.
2557
 * See tcp_data_queue(), for example.
2558
 *
2559
 * Also, controls (RST is main one) are accepted using RCV.WUP instead
2560
 * of RCV.NXT. Peer still did not advance his SND.UNA when we
2561
 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
2562
 * (borrowed from freebsd)
2563
 */
2564
 
2565
static inline int tcp_sequence(struct tcp_opt *tp, u32 seq, u32 end_seq)
2566
{
2567
        return  !before(end_seq, tp->rcv_wup) &&
2568
                !after(seq, tp->rcv_nxt + tcp_receive_window(tp));
2569
}
2570
 
2571
/* When we get a reset we do this. */
2572
static void tcp_reset(struct sock *sk)
2573
{
2574
        /* We want the right error as BSD sees it (and indeed as we do). */
2575
        switch (sk->state) {
2576
                case TCP_SYN_SENT:
2577
                        sk->err = ECONNREFUSED;
2578
                        break;
2579
                case TCP_CLOSE_WAIT:
2580
                        sk->err = EPIPE;
2581
                        break;
2582
                case TCP_CLOSE:
2583
                        return;
2584
                default:
2585
                        sk->err = ECONNRESET;
2586
        }
2587
 
2588
        if (!sk->dead)
2589
                sk->error_report(sk);
2590
 
2591
        tcp_done(sk);
2592
}
2593
 
2594
/*
2595
 *      Process the FIN bit. This now behaves as it is supposed to work
2596
 *      and the FIN takes effect when it is validly part of sequence
2597
 *      space. Not before when we get holes.
2598
 *
2599
 *      If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
2600
 *      (and thence onto LAST-ACK and finally, CLOSE, we never enter
2601
 *      TIME-WAIT)
2602
 *
2603
 *      If we are in FINWAIT-1, a received FIN indicates simultaneous
2604
 *      close and we go into CLOSING (and later onto TIME-WAIT)
2605
 *
2606
 *      If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
2607
 */
2608
static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
2609
{
2610
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2611
 
2612
        tcp_schedule_ack(tp);
2613
 
2614
        sk->shutdown |= RCV_SHUTDOWN;
2615
        sk->done = 1;
2616
 
2617
        switch(sk->state) {
2618
                case TCP_SYN_RECV:
2619
                case TCP_ESTABLISHED:
2620
                        /* Move to CLOSE_WAIT */
2621
                        tcp_set_state(sk, TCP_CLOSE_WAIT);
2622
                        tp->ack.pingpong = 1;
2623
                        break;
2624
 
2625
                case TCP_CLOSE_WAIT:
2626
                case TCP_CLOSING:
2627
                        /* Received a retransmission of the FIN, do
2628
                         * nothing.
2629
                         */
2630
                        break;
2631
                case TCP_LAST_ACK:
2632
                        /* RFC793: Remain in the LAST-ACK state. */
2633
                        break;
2634
 
2635
                case TCP_FIN_WAIT1:
2636
                        /* This case occurs when a simultaneous close
2637
                         * happens, we must ack the received FIN and
2638
                         * enter the CLOSING state.
2639
                         */
2640
                        tcp_send_ack(sk);
2641
                        tcp_set_state(sk, TCP_CLOSING);
2642
                        break;
2643
                case TCP_FIN_WAIT2:
2644
                        /* Received a FIN -- send ACK and enter TIME_WAIT. */
2645
                        tcp_send_ack(sk);
2646
                        tcp_time_wait(sk, TCP_TIME_WAIT, 0);
2647
                        break;
2648
                default:
2649
                        /* Only TCP_LISTEN and TCP_CLOSE are left, in these
2650
                         * cases we should never reach this piece of code.
2651
                         */
2652
                        printk("tcp_fin: Impossible, sk->state=%d\n", sk->state);
2653
                        break;
2654
        };
2655
 
2656
        /* It _is_ possible, that we have something out-of-order _after_ FIN.
2657
         * Probably, we should reset in this case. For now drop them.
2658
         */
2659
        __skb_queue_purge(&tp->out_of_order_queue);
2660
        if (tp->sack_ok)
2661
                tcp_sack_reset(tp);
2662
        tcp_mem_reclaim(sk);
2663
 
2664
        if (!sk->dead) {
2665
                sk->state_change(sk);
2666
 
2667
                /* Do not send POLL_HUP for half duplex close. */
2668
                if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
2669
                        sk_wake_async(sk, 1, POLL_HUP);
2670
                else
2671
                        sk_wake_async(sk, 1, POLL_IN);
2672
        }
2673
}
2674
 
2675
static __inline__ int
2676
tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq)
2677
{
2678
        if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
2679
                if (before(seq, sp->start_seq))
2680
                        sp->start_seq = seq;
2681
                if (after(end_seq, sp->end_seq))
2682
                        sp->end_seq = end_seq;
2683
                return 1;
2684
        }
2685
        return 0;
2686
}
2687
 
2688
static __inline__ void tcp_dsack_set(struct tcp_opt *tp, u32 seq, u32 end_seq)
2689
{
2690
        if (tp->sack_ok && sysctl_tcp_dsack) {
2691
                if (before(seq, tp->rcv_nxt))
2692
                        NET_INC_STATS_BH(TCPDSACKOldSent);
2693
                else
2694
                        NET_INC_STATS_BH(TCPDSACKOfoSent);
2695
 
2696
                tp->dsack = 1;
2697
                tp->duplicate_sack[0].start_seq = seq;
2698
                tp->duplicate_sack[0].end_seq = end_seq;
2699
                tp->eff_sacks = min(tp->num_sacks+1, 4-tp->tstamp_ok);
2700
        }
2701
}
2702
 
2703
static __inline__ void tcp_dsack_extend(struct tcp_opt *tp, u32 seq, u32 end_seq)
2704
{
2705
        if (!tp->dsack)
2706
                tcp_dsack_set(tp, seq, end_seq);
2707
        else
2708
                tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
2709
}
2710
 
2711
static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
2712
{
2713
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2714
 
2715
        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
2716
            before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
2717
                NET_INC_STATS_BH(DelayedACKLost);
2718
                tcp_enter_quickack_mode(tp);
2719
 
2720
                if (tp->sack_ok && sysctl_tcp_dsack) {
2721
                        u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2722
 
2723
                        if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
2724
                                end_seq = tp->rcv_nxt;
2725
                        tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, end_seq);
2726
                }
2727
        }
2728
 
2729
        tcp_send_ack(sk);
2730
}
2731
 
2732
/* These routines update the SACK block as out-of-order packets arrive or
2733
 * in-order packets close up the sequence space.
2734
 */
2735
static void tcp_sack_maybe_coalesce(struct tcp_opt *tp)
2736
{
2737
        int this_sack;
2738
        struct tcp_sack_block *sp = &tp->selective_acks[0];
2739
        struct tcp_sack_block *swalk = sp+1;
2740
 
2741
        /* See if the recent change to the first SACK eats into
2742
         * or hits the sequence space of other SACK blocks, if so coalesce.
2743
         */
2744
        for (this_sack = 1; this_sack < tp->num_sacks; ) {
2745
                if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
2746
                        int i;
2747
 
2748
                        /* Zap SWALK, by moving every further SACK up by one slot.
2749
                         * Decrease num_sacks.
2750
                         */
2751
                        tp->num_sacks--;
2752
                        tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
2753
                        for(i=this_sack; i < tp->num_sacks; i++)
2754
                                sp[i] = sp[i+1];
2755
                        continue;
2756
                }
2757
                this_sack++, swalk++;
2758
        }
2759
}
2760
 
2761
static __inline__ void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sack_block *sack2)
2762
{
2763
        __u32 tmp;
2764
 
2765
        tmp = sack1->start_seq;
2766
        sack1->start_seq = sack2->start_seq;
2767
        sack2->start_seq = tmp;
2768
 
2769
        tmp = sack1->end_seq;
2770
        sack1->end_seq = sack2->end_seq;
2771
        sack2->end_seq = tmp;
2772
}
2773
 
2774
static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
2775
{
2776
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2777
        struct tcp_sack_block *sp = &tp->selective_acks[0];
2778
        int cur_sacks = tp->num_sacks;
2779
        int this_sack;
2780
 
2781
        if (!cur_sacks)
2782
                goto new_sack;
2783
 
2784
        for (this_sack=0; this_sack<cur_sacks; this_sack++, sp++) {
2785
                if (tcp_sack_extend(sp, seq, end_seq)) {
2786
                        /* Rotate this_sack to the first one. */
2787
                        for (; this_sack>0; this_sack--, sp--)
2788
                                tcp_sack_swap(sp, sp-1);
2789
                        if (cur_sacks > 1)
2790
                                tcp_sack_maybe_coalesce(tp);
2791
                        return;
2792
                }
2793
        }
2794
 
2795
        /* Could not find an adjacent existing SACK, build a new one,
2796
         * put it at the front, and shift everyone else down.  We
2797
         * always know there is at least one SACK present already here.
2798
         *
2799
         * If the sack array is full, forget about the last one.
2800
         */
2801
        if (this_sack >= 4) {
2802
                this_sack--;
2803
                tp->num_sacks--;
2804
                sp--;
2805
        }
2806
        for(; this_sack > 0; this_sack--, sp--)
2807
                *sp = *(sp-1);
2808
 
2809
new_sack:
2810
        /* Build the new head SACK, and we're done. */
2811
        sp->start_seq = seq;
2812
        sp->end_seq = end_seq;
2813
        tp->num_sacks++;
2814
        tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
2815
}
2816
 
2817
/* RCV.NXT advances, some SACKs should be eaten. */
2818
 
2819
static void tcp_sack_remove(struct tcp_opt *tp)
2820
{
2821
        struct tcp_sack_block *sp = &tp->selective_acks[0];
2822
        int num_sacks = tp->num_sacks;
2823
        int this_sack;
2824
 
2825
        /* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
2826
        if (skb_queue_len(&tp->out_of_order_queue) == 0) {
2827
                tp->num_sacks = 0;
2828
                tp->eff_sacks = tp->dsack;
2829
                return;
2830
        }
2831
 
2832
        for(this_sack = 0; this_sack < num_sacks; ) {
2833
                /* Check if the start of the sack is covered by RCV.NXT. */
2834
                if (!before(tp->rcv_nxt, sp->start_seq)) {
2835
                        int i;
2836
 
2837
                        /* RCV.NXT must cover all the block! */
2838
                        BUG_TRAP(!before(tp->rcv_nxt, sp->end_seq));
2839
 
2840
                        /* Zap this SACK, by moving forward any other SACKS. */
2841
                        for (i=this_sack+1; i < num_sacks; i++)
2842
                                tp->selective_acks[i-1] = tp->selective_acks[i];
2843
                        num_sacks--;
2844
                        continue;
2845
                }
2846
                this_sack++;
2847
                sp++;
2848
        }
2849
        if (num_sacks != tp->num_sacks) {
2850
                tp->num_sacks = num_sacks;
2851
                tp->eff_sacks = min(tp->num_sacks+tp->dsack, 4-tp->tstamp_ok);
2852
        }
2853
}
2854
 
2855
/* This one checks to see if we can put data from the
2856
 * out_of_order queue into the receive_queue.
2857
 */
2858
static void tcp_ofo_queue(struct sock *sk)
2859
{
2860
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2861
        __u32 dsack_high = tp->rcv_nxt;
2862
        struct sk_buff *skb;
2863
 
2864
        while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
2865
                if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
2866
                        break;
2867
 
2868
                if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
2869
                        __u32 dsack = dsack_high;
2870
                        if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
2871
                                dsack_high = TCP_SKB_CB(skb)->end_seq;
2872
                        tcp_dsack_extend(tp, TCP_SKB_CB(skb)->seq, dsack);
2873
                }
2874
 
2875
                if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2876
                        SOCK_DEBUG(sk, "ofo packet was already received \n");
2877
                        __skb_unlink(skb, skb->list);
2878
                        __kfree_skb(skb);
2879
                        continue;
2880
                }
2881
                SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
2882
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
2883
                           TCP_SKB_CB(skb)->end_seq);
2884
 
2885
                __skb_unlink(skb, skb->list);
2886
                __skb_queue_tail(&sk->receive_queue, skb);
2887
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2888
                if(skb->h.th->fin)
2889
                        tcp_fin(skb, sk, skb->h.th);
2890
        }
2891
}
2892
 
2893
static inline int tcp_rmem_schedule(struct sock *sk, struct sk_buff *skb)
2894
{
2895
        return (int)skb->truesize <= sk->forward_alloc ||
2896
                tcp_mem_schedule(sk, skb->truesize, 1);
2897
}
2898
 
2899
static int tcp_prune_queue(struct sock *sk);
2900
 
2901
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
2902
{
2903
        struct tcphdr *th = skb->h.th;
2904
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2905
        int eaten = -1;
2906
 
2907
        if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
2908
                goto drop;
2909
 
2910
        th = skb->h.th;
2911
        __skb_pull(skb, th->doff*4);
2912
 
2913
        TCP_ECN_accept_cwr(tp, skb);
2914
 
2915
        if (tp->dsack) {
2916
                tp->dsack = 0;
2917
                tp->eff_sacks = min_t(unsigned int, tp->num_sacks, 4-tp->tstamp_ok);
2918
        }
2919
 
2920
        /*  Queue data for delivery to the user.
2921
         *  Packets in sequence go to the receive queue.
2922
         *  Out of sequence packets to the out_of_order_queue.
2923
         */
2924
        if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
2925
                if (tcp_receive_window(tp) == 0)
2926
                        goto out_of_window;
2927
 
2928
                /* Ok. In sequence. In window. */
2929
                if (tp->ucopy.task == current &&
2930
                    tp->copied_seq == tp->rcv_nxt &&
2931
                    tp->ucopy.len &&
2932
                    sk->lock.users &&
2933
                    !tp->urg_data) {
2934
                        int chunk = min_t(unsigned int, skb->len, tp->ucopy.len);
2935
 
2936
                        __set_current_state(TASK_RUNNING);
2937
 
2938
                        local_bh_enable();
2939
                        if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
2940
                                tp->ucopy.len -= chunk;
2941
                                tp->copied_seq += chunk;
2942
                                eaten = (chunk == skb->len && !th->fin);
2943
                        }
2944
                        local_bh_disable();
2945
                }
2946
 
2947
                if (eaten <= 0) {
2948
queue_and_out:
2949
                        if (eaten < 0 &&
2950
                            (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
2951
                             !tcp_rmem_schedule(sk, skb))) {
2952
                                if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
2953
                                        goto drop;
2954
                        }
2955
                        tcp_set_owner_r(skb, sk);
2956
                        __skb_queue_tail(&sk->receive_queue, skb);
2957
                }
2958
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
2959
                if(skb->len)
2960
                        tcp_event_data_recv(sk, tp, skb);
2961
                if(th->fin)
2962
                        tcp_fin(skb, sk, th);
2963
 
2964
                if (skb_queue_len(&tp->out_of_order_queue)) {
2965
                        tcp_ofo_queue(sk);
2966
 
2967
                        /* RFC2581. 4.2. SHOULD send immediate ACK, when
2968
                         * gap in queue is filled.
2969
                         */
2970
                        if (skb_queue_len(&tp->out_of_order_queue) == 0)
2971
                                tp->ack.pingpong = 0;
2972
                }
2973
 
2974
                if(tp->num_sacks)
2975
                        tcp_sack_remove(tp);
2976
 
2977
                tcp_fast_path_check(sk, tp);
2978
 
2979
                if (eaten > 0) {
2980
                        __kfree_skb(skb);
2981
                } else if (!sk->dead)
2982
                        sk->data_ready(sk, 0);
2983
                return;
2984
        }
2985
 
2986
        if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
2987
                /* A retransmit, 2nd most common case.  Force an immediate ack. */
2988
                NET_INC_STATS_BH(DelayedACKLost);
2989
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
2990
 
2991
out_of_window:
2992
                tcp_enter_quickack_mode(tp);
2993
                tcp_schedule_ack(tp);
2994
drop:
2995
                __kfree_skb(skb);
2996
                return;
2997
        }
2998
 
2999
        /* Out of window. F.e. zero window probe. */
3000
        if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt+tcp_receive_window(tp)))
3001
                goto out_of_window;
3002
 
3003
        tcp_enter_quickack_mode(tp);
3004
 
3005
        if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3006
                /* Partial packet, seq < rcv_next < end_seq */
3007
                SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
3008
                           tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
3009
                           TCP_SKB_CB(skb)->end_seq);
3010
 
3011
                tcp_dsack_set(tp, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
3012
 
3013
                /* If window is closed, drop tail of packet. But after
3014
                 * remembering D-SACK for its head made in previous line.
3015
                 */
3016
                if (!tcp_receive_window(tp))
3017
                        goto out_of_window;
3018
                goto queue_and_out;
3019
        }
3020
 
3021
        TCP_ECN_check_ce(tp, skb);
3022
 
3023
        if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
3024
            !tcp_rmem_schedule(sk, skb)) {
3025
                if (tcp_prune_queue(sk) < 0 || !tcp_rmem_schedule(sk, skb))
3026
                        goto drop;
3027
        }
3028
 
3029
        /* Disable header prediction. */
3030
        tp->pred_flags = 0;
3031
        tcp_schedule_ack(tp);
3032
 
3033
        SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
3034
                   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
3035
 
3036
        tcp_set_owner_r(skb, sk);
3037
 
3038
        if (skb_peek(&tp->out_of_order_queue) == NULL) {
3039
                /* Initial out of order segment, build 1 SACK. */
3040
                if(tp->sack_ok) {
3041
                        tp->num_sacks = 1;
3042
                        tp->dsack = 0;
3043
                        tp->eff_sacks = 1;
3044
                        tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
3045
                        tp->selective_acks[0].end_seq = TCP_SKB_CB(skb)->end_seq;
3046
                }
3047
                __skb_queue_head(&tp->out_of_order_queue,skb);
3048
        } else {
3049
                struct sk_buff *skb1=tp->out_of_order_queue.prev;
3050
                u32 seq = TCP_SKB_CB(skb)->seq;
3051
                u32 end_seq = TCP_SKB_CB(skb)->end_seq;
3052
 
3053
                if (seq == TCP_SKB_CB(skb1)->end_seq) {
3054
                        __skb_append(skb1, skb);
3055
 
3056
                        if (tp->num_sacks == 0 ||
3057
                            tp->selective_acks[0].end_seq != seq)
3058
                                goto add_sack;
3059
 
3060
                        /* Common case: data arrive in order after hole. */
3061
                        tp->selective_acks[0].end_seq = end_seq;
3062
                        return;
3063
                }
3064
 
3065
                /* Find place to insert this segment. */
3066
                do {
3067
                        if (!after(TCP_SKB_CB(skb1)->seq, seq))
3068
                                break;
3069
                } while ((skb1=skb1->prev) != (struct sk_buff*)&tp->out_of_order_queue);
3070
 
3071
                /* Do skb overlap to previous one? */
3072
                if (skb1 != (struct sk_buff*)&tp->out_of_order_queue &&
3073
                    before(seq, TCP_SKB_CB(skb1)->end_seq)) {
3074
                        if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3075
                                /* All the bits are present. Drop. */
3076
                                __kfree_skb(skb);
3077
                                tcp_dsack_set(tp, seq, end_seq);
3078
                                goto add_sack;
3079
                        }
3080
                        if (after(seq, TCP_SKB_CB(skb1)->seq)) {
3081
                                /* Partial overlap. */
3082
                                tcp_dsack_set(tp, seq, TCP_SKB_CB(skb1)->end_seq);
3083
                        } else {
3084
                                skb1 = skb1->prev;
3085
                        }
3086
                }
3087
                __skb_insert(skb, skb1, skb1->next, &tp->out_of_order_queue);
3088
 
3089
                /* And clean segments covered by new one as whole. */
3090
                while ((skb1 = skb->next) != (struct sk_buff*)&tp->out_of_order_queue &&
3091
                       after(end_seq, TCP_SKB_CB(skb1)->seq)) {
3092
                       if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
3093
                               tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, end_seq);
3094
                               break;
3095
                       }
3096
                       __skb_unlink(skb1, skb1->list);
3097
                       tcp_dsack_extend(tp, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq);
3098
                       __kfree_skb(skb1);
3099
                }
3100
 
3101
add_sack:
3102
                if (tp->sack_ok)
3103
                        tcp_sack_new_ofo_skb(sk, seq, end_seq);
3104
        }
3105
}
3106
 
3107
/* Collapse contiguous sequence of skbs head..tail with
3108
 * sequence numbers start..end.
3109
 * Segments with FIN/SYN are not collapsed (only because this
3110
 * simplifies code)
3111
 */
3112
static void
3113
tcp_collapse(struct sock *sk, struct sk_buff *head,
3114
             struct sk_buff *tail, u32 start, u32 end)
3115
{
3116
        struct sk_buff *skb;
3117
 
3118
        /* First, check that queue is collapsable and find
3119
         * the point where collapsing can be useful. */
3120
        for (skb = head; skb != tail; ) {
3121
                /* No new bits? It is possible on ofo queue. */
3122
                if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3123
                        struct sk_buff *next = skb->next;
3124
                        __skb_unlink(skb, skb->list);
3125
                        __kfree_skb(skb);
3126
                        NET_INC_STATS_BH(TCPRcvCollapsed);
3127
                        skb = next;
3128
                        continue;
3129
                }
3130
 
3131
                /* The first skb to collapse is:
3132
                 * - not SYN/FIN and
3133
                 * - bloated or contains data before "start" or
3134
                 *   overlaps to the next one.
3135
                 */
3136
                if (!skb->h.th->syn && !skb->h.th->fin &&
3137
                    (tcp_win_from_space(skb->truesize) > skb->len ||
3138
                     before(TCP_SKB_CB(skb)->seq, start) ||
3139
                     (skb->next != tail &&
3140
                      TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq)))
3141
                        break;
3142
 
3143
                /* Decided to skip this, advance start seq. */
3144
                start = TCP_SKB_CB(skb)->end_seq;
3145
                skb = skb->next;
3146
        }
3147
        if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3148
                return;
3149
 
3150
        while (before(start, end)) {
3151
                struct sk_buff *nskb;
3152
                int header = skb_headroom(skb);
3153
                int copy = (PAGE_SIZE - sizeof(struct sk_buff) -
3154
                            sizeof(struct skb_shared_info) - header - 31)&~15;
3155
 
3156
                /* Too big header? This can happen with IPv6. */
3157
                if (copy < 0)
3158
                        return;
3159
                if (end-start < copy)
3160
                        copy = end-start;
3161
                nskb = alloc_skb(copy+header, GFP_ATOMIC);
3162
                if (!nskb)
3163
                        return;
3164
                skb_reserve(nskb, header);
3165
                memcpy(nskb->head, skb->head, header);
3166
                nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
3167
                nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
3168
                nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
3169
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
3170
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
3171
                __skb_insert(nskb, skb->prev, skb, skb->list);
3172
                tcp_set_owner_r(nskb, sk);
3173
 
3174
                /* Copy data, releasing collapsed skbs. */
3175
                while (copy > 0) {
3176
                        int offset = start - TCP_SKB_CB(skb)->seq;
3177
                        int size = TCP_SKB_CB(skb)->end_seq - start;
3178
 
3179
                        if (offset < 0) BUG();
3180
                        if (size > 0) {
3181
                                size = min(copy, size);
3182
                                if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
3183
                                        BUG();
3184
                                TCP_SKB_CB(nskb)->end_seq += size;
3185
                                copy -= size;
3186
                                start += size;
3187
                        }
3188
                        if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
3189
                                struct sk_buff *next = skb->next;
3190
                                __skb_unlink(skb, skb->list);
3191
                                __kfree_skb(skb);
3192
                                NET_INC_STATS_BH(TCPRcvCollapsed);
3193
                                skb = next;
3194
                                if (skb == tail || skb->h.th->syn || skb->h.th->fin)
3195
                                        return;
3196
                        }
3197
                }
3198
        }
3199
}
3200
 
3201
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
3202
 * and tcp_collapse() them until all the queue is collapsed.
3203
 */
3204
static void tcp_collapse_ofo_queue(struct sock *sk)
3205
{
3206
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3207
        struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
3208
        struct sk_buff *head;
3209
        u32 start, end;
3210
 
3211
        if (skb == NULL)
3212
                return;
3213
 
3214
        start = TCP_SKB_CB(skb)->seq;
3215
        end = TCP_SKB_CB(skb)->end_seq;
3216
        head = skb;
3217
 
3218
        for (;;) {
3219
                skb = skb->next;
3220
 
3221
                /* Segment is terminated when we see gap or when
3222
                 * we are at the end of all the queue. */
3223
                if (skb == (struct sk_buff *)&tp->out_of_order_queue ||
3224
                    after(TCP_SKB_CB(skb)->seq, end) ||
3225
                    before(TCP_SKB_CB(skb)->end_seq, start)) {
3226
                        tcp_collapse(sk, head, skb, start, end);
3227
                        head = skb;
3228
                        if (skb == (struct sk_buff *)&tp->out_of_order_queue)
3229
                                break;
3230
                        /* Start new segment */
3231
                        start = TCP_SKB_CB(skb)->seq;
3232
                        end = TCP_SKB_CB(skb)->end_seq;
3233
                } else {
3234
                        if (before(TCP_SKB_CB(skb)->seq, start))
3235
                                start = TCP_SKB_CB(skb)->seq;
3236
                        if (after(TCP_SKB_CB(skb)->end_seq, end))
3237
                                end = TCP_SKB_CB(skb)->end_seq;
3238
                }
3239
        }
3240
}
3241
 
3242
/* Reduce allocated memory if we can, trying to get
3243
 * the socket within its memory limits again.
3244
 *
3245
 * Return less than zero if we should start dropping frames
3246
 * until the socket owning process reads some of the data
3247
 * to stabilize the situation.
3248
 */
3249
static int tcp_prune_queue(struct sock *sk)
3250
{
3251
        struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
3252
 
3253
        SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
3254
 
3255
        NET_INC_STATS_BH(PruneCalled);
3256
 
3257
        if (atomic_read(&sk->rmem_alloc) >= sk->rcvbuf)
3258
                tcp_clamp_window(sk, tp);
3259
        else if (tcp_memory_pressure)
3260
                tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
3261
 
3262
        tcp_collapse_ofo_queue(sk);
3263
        tcp_collapse(sk, sk->receive_queue.next,
3264
                     (struct sk_buff*)&sk->receive_queue,
3265
                     tp->copied_seq, tp->rcv_nxt);
3266
        tcp_mem_reclaim(sk);
3267
 
3268
        if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
3269
                return 0;
3270
 
3271
        /* Collapsing did not help, destructive actions follow.
3272
         * This must not ever occur. */
3273
 
3274
        /* First, purge the out_of_order queue. */
3275
        if (skb_queue_len(&tp->out_of_order_queue)) {
3276
                net_statistics[smp_processor_id()*2].OfoPruned += skb_queue_len(&tp->out_of_order_queue);
3277
                __skb_queue_purge(&tp->out_of_order_queue);
3278
 
3279
                /* Reset SACK state.  A conforming SACK implementation will
3280
                 * do the same at a timeout based retransmit.  When a connection
3281
                 * is in a sad state like this, we care only about integrity
3282
                 * of the connection not performance.
3283
                 */
3284
                if(tp->sack_ok)
3285
                        tcp_sack_reset(tp);
3286
                tcp_mem_reclaim(sk);
3287
        }
3288
 
3289
        if(atomic_read(&sk->rmem_alloc) <= sk->rcvbuf)
3290
                return 0;
3291
 
3292
        /* If we are really being abused, tell the caller to silently
3293
         * drop receive data on the floor.  It will get retransmitted
3294
         * and hopefully then we'll have sufficient space.
3295
         */
3296
        NET_INC_STATS_BH(RcvPruned);
3297
 
3298
        /* Massive buffer overcommit. */
3299
        tp->pred_flags = 0;
3300
        return -1;
3301
}
3302
 
3303
 
3304
/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
3305
 * As additional protections, we do not touch cwnd in retransmission phases,
3306
 * and if application hit its sndbuf limit recently.
3307
 */
3308
void tcp_cwnd_application_limited(struct sock *sk)
3309
{
3310
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3311
 
3312
        if (tp->ca_state == TCP_CA_Open &&
3313
            sk->socket && !test_bit(SOCK_NOSPACE, &sk->socket->flags)) {
3314
                /* Limited by application or receiver window. */
3315
                u32 win_used = max(tp->snd_cwnd_used, 2U);
3316
                if (win_used < tp->snd_cwnd) {
3317
                        tp->snd_ssthresh = tcp_current_ssthresh(tp);
3318
                        tp->snd_cwnd = (tp->snd_cwnd+win_used)>>1;
3319
                }
3320
                tp->snd_cwnd_used = 0;
3321
        }
3322
        tp->snd_cwnd_stamp = tcp_time_stamp;
3323
}
3324
 
3325
 
3326
/* When incoming ACK allowed to free some skb from write_queue,
3327
 * we remember this event in flag tp->queue_shrunk and wake up socket
3328
 * on the exit from tcp input handler.
3329
 */
3330
static void tcp_new_space(struct sock *sk)
3331
{
3332
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3333
 
3334
        if (tp->packets_out < tp->snd_cwnd &&
3335
            !(sk->userlocks&SOCK_SNDBUF_LOCK) &&
3336
            !tcp_memory_pressure &&
3337
            atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
3338
                int sndmem, demanded;
3339
 
3340
                sndmem = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
3341
                demanded = max_t(unsigned int, tp->snd_cwnd, tp->reordering+1);
3342
                sndmem *= 2*demanded;
3343
                if (sndmem > sk->sndbuf)
3344
                        sk->sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
3345
                tp->snd_cwnd_stamp = tcp_time_stamp;
3346
        }
3347
 
3348
        sk->write_space(sk);
3349
}
3350
 
3351
static inline void tcp_check_space(struct sock *sk)
3352
{
3353
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3354
 
3355
        if (tp->queue_shrunk) {
3356
                tp->queue_shrunk = 0;
3357
                if (sk->socket && test_bit(SOCK_NOSPACE, &sk->socket->flags))
3358
                        tcp_new_space(sk);
3359
        }
3360
}
3361
 
3362
static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb)
3363
{
3364
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3365
 
3366
        if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) ||
3367
            tcp_packets_in_flight(tp) >= tp->snd_cwnd ||
3368
            tcp_write_xmit(sk, tp->nonagle))
3369
                tcp_check_probe_timer(sk, tp);
3370
}
3371
 
3372
static __inline__ void tcp_data_snd_check(struct sock *sk)
3373
{
3374
        struct sk_buff *skb = sk->tp_pinfo.af_tcp.send_head;
3375
 
3376
        if (skb != NULL)
3377
                __tcp_data_snd_check(sk, skb);
3378
        tcp_check_space(sk);
3379
}
3380
 
3381
/*
3382
 * Check if sending an ack is needed.
3383
 */
3384
static __inline__ void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
3385
{
3386
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3387
 
3388
            /* More than one full frame received... */
3389
        if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss
3390
             /* ... and right edge of window advances far enough.
3391
              * (tcp_recvmsg() will send ACK otherwise). Or...
3392
              */
3393
             && __tcp_select_window(sk) >= tp->rcv_wnd) ||
3394
            /* We ACK each frame or... */
3395
            tcp_in_quickack_mode(tp) ||
3396
            /* We have out of order data. */
3397
            (ofo_possible &&
3398
             skb_peek(&tp->out_of_order_queue) != NULL)) {
3399
                /* Then ack it now */
3400
                tcp_send_ack(sk);
3401
        } else {
3402
                /* Else, send delayed ack. */
3403
                tcp_send_delayed_ack(sk);
3404
        }
3405
}
3406
 
3407
static __inline__ void tcp_ack_snd_check(struct sock *sk)
3408
{
3409
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3410
        if (!tcp_ack_scheduled(tp)) {
3411
                /* We sent a data segment already. */
3412
                return;
3413
        }
3414
        __tcp_ack_snd_check(sk, 1);
3415
}
3416
 
3417
/*
3418
 *      This routine is only called when we have urgent data
3419
 *      signalled. Its the 'slow' part of tcp_urg. It could be
3420
 *      moved inline now as tcp_urg is only called from one
3421
 *      place. We handle URGent data wrong. We have to - as
3422
 *      BSD still doesn't use the correction from RFC961.
3423
 *      For 1003.1g we should support a new option TCP_STDURG to permit
3424
 *      either form (or just set the sysctl tcp_stdurg).
3425
 */
3426
 
3427
static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
3428
{
3429
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3430
        u32 ptr = ntohs(th->urg_ptr);
3431
 
3432
        if (ptr && !sysctl_tcp_stdurg)
3433
                ptr--;
3434
        ptr += ntohl(th->seq);
3435
 
3436
        /* Ignore urgent data that we've already seen and read. */
3437
        if (after(tp->copied_seq, ptr))
3438
                return;
3439
 
3440
        /* Do not replay urg ptr.
3441
         *
3442
         * NOTE: interesting situation not covered by specs.
3443
         * Misbehaving sender may send urg ptr, pointing to segment,
3444
         * which we already have in ofo queue. We are not able to fetch
3445
         * such data and will stay in TCP_URG_NOTYET until will be eaten
3446
         * by recvmsg(). Seems, we are not obliged to handle such wicked
3447
         * situations. But it is worth to think about possibility of some
3448
         * DoSes using some hypothetical application level deadlock.
3449
         */
3450
        if (before(ptr, tp->rcv_nxt))
3451
                return;
3452
 
3453
        /* Do we already have a newer (or duplicate) urgent pointer? */
3454
        if (tp->urg_data && !after(ptr, tp->urg_seq))
3455
                return;
3456
 
3457
        /* Tell the world about our new urgent pointer. */
3458
        if (sk->proc != 0) {
3459
                if (sk->proc > 0)
3460
                        kill_proc(sk->proc, SIGURG, 1);
3461
                else
3462
                        kill_pg(-sk->proc, SIGURG, 1);
3463
                sk_wake_async(sk, 3, POLL_PRI);
3464
        }
3465
 
3466
        /* We may be adding urgent data when the last byte read was
3467
         * urgent. To do this requires some care. We cannot just ignore
3468
         * tp->copied_seq since we would read the last urgent byte again
3469
         * as data, nor can we alter copied_seq until this data arrives
3470
         * or we break the sematics of SIOCATMARK (and thus sockatmark())
3471
         *
3472
         * NOTE. Double Dutch. Rendering to plain English: author of comment
3473
         * above did something sort of  send("A", MSG_OOB); send("B", MSG_OOB);
3474
         * and expect that both A and B disappear from stream. This is _wrong_.
3475
         * Though this happens in BSD with high probability, this is occasional.
3476
         * Any application relying on this is buggy. Note also, that fix "works"
3477
         * only in this artificial test. Insert some normal data between A and B and we will
3478
         * decline of BSD again. Verdict: it is better to remove to trap
3479
         * buggy users.
3480
         */
3481
        if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
3482
            !sk->urginline &&
3483
            tp->copied_seq != tp->rcv_nxt) {
3484
                struct sk_buff *skb = skb_peek(&sk->receive_queue);
3485
                tp->copied_seq++;
3486
                if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
3487
                        __skb_unlink(skb, skb->list);
3488
                        __kfree_skb(skb);
3489
                }
3490
        }
3491
 
3492
        tp->urg_data = TCP_URG_NOTYET;
3493
        tp->urg_seq = ptr;
3494
 
3495
        /* Disable header prediction. */
3496
        tp->pred_flags = 0;
3497
}
3498
 
3499
/* This is the 'fast' part of urgent handling. */
3500
static inline void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
3501
{
3502
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3503
 
3504
        /* Check if we get a new urgent pointer - normally not. */
3505
        if (th->urg)
3506
                tcp_check_urg(sk,th);
3507
 
3508
        /* Do we wait for any urgent data? - normally not... */
3509
        if (tp->urg_data == TCP_URG_NOTYET) {
3510
                u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff*4) - th->syn;
3511
 
3512
                /* Is the urgent pointer pointing into this packet? */
3513
                if (ptr < skb->len) {
3514
                        u8 tmp;
3515
                        if (skb_copy_bits(skb, ptr, &tmp, 1))
3516
                                BUG();
3517
                        tp->urg_data = TCP_URG_VALID | tmp;
3518
                        if (!sk->dead)
3519
                                sk->data_ready(sk,0);
3520
                }
3521
        }
3522
}
3523
 
3524
static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
3525
{
3526
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3527
        int chunk = skb->len - hlen;
3528
        int err;
3529
 
3530
        local_bh_enable();
3531
        if (skb->ip_summed==CHECKSUM_UNNECESSARY)
3532
                err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
3533
        else
3534
                err = skb_copy_and_csum_datagram_iovec(skb, hlen, tp->ucopy.iov);
3535
 
3536
        if (!err) {
3537
                tp->ucopy.len -= chunk;
3538
                tp->copied_seq += chunk;
3539
        }
3540
 
3541
        local_bh_disable();
3542
        return err;
3543
}
3544
 
3545
static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3546
{
3547
        int result;
3548
 
3549
        if (sk->lock.users) {
3550
                local_bh_enable();
3551
                result = __tcp_checksum_complete(skb);
3552
                local_bh_disable();
3553
        } else {
3554
                result = __tcp_checksum_complete(skb);
3555
        }
3556
        return result;
3557
}
3558
 
3559
static __inline__ int
3560
tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb)
3561
{
3562
        return skb->ip_summed != CHECKSUM_UNNECESSARY &&
3563
                __tcp_checksum_complete_user(sk, skb);
3564
}
3565
 
3566
/*
3567
 *      TCP receive function for the ESTABLISHED state.
3568
 *
3569
 *      It is split into a fast path and a slow path. The fast path is
3570
 *      disabled when:
3571
 *      - A zero window was announced from us - zero window probing
3572
 *        is only handled properly in the slow path.
3573
 *      - Out of order segments arrived.
3574
 *      - Urgent data is expected.
3575
 *      - There is no buffer space left
3576
 *      - Unexpected TCP flags/window values/header lengths are received
3577
 *        (detected by checking the TCP header against pred_flags)
3578
 *      - Data is sent in both directions. Fast path only supports pure senders
3579
 *        or pure receivers (this means either the sequence number or the ack
3580
 *        value must stay constant)
3581
 *      - Unexpected TCP option.
3582
 *
3583
 *      When these conditions are not satisfied it drops into a standard
3584
 *      receive procedure patterned after RFC793 to handle all cases.
3585
 *      The first three cases are guaranteed by proper pred_flags setting,
3586
 *      the rest is checked inline. Fast processing is turned on in
3587
 *      tcp_data_queue when everything is OK.
3588
 */
3589
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
3590
                        struct tcphdr *th, unsigned len)
3591
{
3592
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3593
 
3594
        /*
3595
         *      Header prediction.
3596
         *      The code loosely follows the one in the famous
3597
         *      "30 instruction TCP receive" Van Jacobson mail.
3598
         *
3599
         *      Van's trick is to deposit buffers into socket queue
3600
         *      on a device interrupt, to call tcp_recv function
3601
         *      on the receive process context and checksum and copy
3602
         *      the buffer to user space. smart...
3603
         *
3604
         *      Our current scheme is not silly either but we take the
3605
         *      extra cost of the net_bh soft interrupt processing...
3606
         *      We do checksum and copy also but from device to kernel.
3607
         */
3608
 
3609
        tp->saw_tstamp = 0;
3610
 
3611
        /*      pred_flags is 0xS?10 << 16 + snd_wnd
3612
         *      if header_predition is to be made
3613
         *      'S' will always be tp->tcp_header_len >> 2
3614
         *      '?' will be 0 for the fast path, otherwise pred_flags is 0 to
3615
         *  turn it off (when there are holes in the receive
3616
         *       space for instance)
3617
         *      PSH flag is ignored.
3618
         */
3619
 
3620
        if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
3621
                TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
3622
                int tcp_header_len = tp->tcp_header_len;
3623
 
3624
                /* Timestamp header prediction: tcp_header_len
3625
                 * is automatically equal to th->doff*4 due to pred_flags
3626
                 * match.
3627
                 */
3628
 
3629
                /* Check timestamp */
3630
                if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
3631
                        __u32 *ptr = (__u32 *)(th + 1);
3632
 
3633
                        /* No? Slow path! */
3634
                        if (*ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3635
                                           | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
3636
                                goto slow_path;
3637
 
3638
                        tp->saw_tstamp = 1;
3639
                        ++ptr;
3640
                        tp->rcv_tsval = ntohl(*ptr);
3641
                        ++ptr;
3642
                        tp->rcv_tsecr = ntohl(*ptr);
3643
 
3644
                        /* If PAWS failed, check it more carefully in slow path */
3645
                        if ((s32)(tp->rcv_tsval - tp->ts_recent) < 0)
3646
                                goto slow_path;
3647
 
3648
                        /* DO NOT update ts_recent here, if checksum fails
3649
                         * and timestamp was corrupted part, it will result
3650
                         * in a hung connection since we will drop all
3651
                         * future packets due to the PAWS test.
3652
                         */
3653
                }
3654
 
3655
                if (len <= tcp_header_len) {
3656
                        /* Bulk data transfer: sender */
3657
                        if (len == tcp_header_len) {
3658
                                /* Predicted packet is in window by definition.
3659
                                 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3660
                                 * Hence, check seq<=rcv_wup reduces to:
3661
                                 */
3662
                                if (tcp_header_len ==
3663
                                    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
3664
                                    tp->rcv_nxt == tp->rcv_wup)
3665
                                        tcp_store_ts_recent(tp);
3666
                                /* We know that such packets are checksummed
3667
                                 * on entry.
3668
                                 */
3669
                                tcp_ack(sk, skb, 0);
3670
                                __kfree_skb(skb);
3671
                                tcp_data_snd_check(sk);
3672
                                return 0;
3673
                        } else { /* Header too small */
3674
                                TCP_INC_STATS_BH(TcpInErrs);
3675
                                goto discard;
3676
                        }
3677
                } else {
3678
                        int eaten = 0;
3679
 
3680
                        if (tp->ucopy.task == current &&
3681
                            tp->copied_seq == tp->rcv_nxt &&
3682
                            len - tcp_header_len <= tp->ucopy.len &&
3683
                            sk->lock.users) {
3684
                                __set_current_state(TASK_RUNNING);
3685
 
3686
                                if (!tcp_copy_to_iovec(sk, skb, tcp_header_len)) {
3687
                                        /* Predicted packet is in window by definition.
3688
                                         * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3689
                                         * Hence, check seq<=rcv_wup reduces to:
3690
                                         */
3691
                                        if (tcp_header_len ==
3692
                                            (sizeof(struct tcphdr) +
3693
                                             TCPOLEN_TSTAMP_ALIGNED) &&
3694
                                            tp->rcv_nxt == tp->rcv_wup)
3695
                                                tcp_store_ts_recent(tp);
3696
 
3697
                                        __skb_pull(skb, tcp_header_len);
3698
                                        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3699
                                        NET_INC_STATS_BH(TCPHPHitsToUser);
3700
                                        eaten = 1;
3701
                                }
3702
                        }
3703
                        if (!eaten) {
3704
                                if (tcp_checksum_complete_user(sk, skb))
3705
                                        goto csum_error;
3706
 
3707
                                /* Predicted packet is in window by definition.
3708
                                 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
3709
                                 * Hence, check seq<=rcv_wup reduces to:
3710
                                 */
3711
                                if (tcp_header_len ==
3712
                                    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
3713
                                    tp->rcv_nxt == tp->rcv_wup)
3714
                                        tcp_store_ts_recent(tp);
3715
 
3716
                                if ((int)skb->truesize > sk->forward_alloc)
3717
                                        goto step5;
3718
 
3719
                                NET_INC_STATS_BH(TCPHPHits);
3720
 
3721
                                /* Bulk data transfer: receiver */
3722
                                __skb_pull(skb,tcp_header_len);
3723
                                __skb_queue_tail(&sk->receive_queue, skb);
3724
                                tcp_set_owner_r(skb, sk);
3725
                                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
3726
                        }
3727
 
3728
                        tcp_event_data_recv(sk, tp, skb);
3729
 
3730
                        if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
3731
                                /* Well, only one small jumplet in fast path... */
3732
                                tcp_ack(sk, skb, FLAG_DATA);
3733
                                tcp_data_snd_check(sk);
3734
                                if (!tcp_ack_scheduled(tp))
3735
                                        goto no_ack;
3736
                        }
3737
 
3738
                        if (eaten) {
3739
                                if (tcp_in_quickack_mode(tp)) {
3740
                                        tcp_send_ack(sk);
3741
                                } else {
3742
                                        tcp_send_delayed_ack(sk);
3743
                                }
3744
                        } else {
3745
                                __tcp_ack_snd_check(sk, 0);
3746
                        }
3747
 
3748
no_ack:
3749
                        if (eaten)
3750
                                __kfree_skb(skb);
3751
                        else
3752
                                sk->data_ready(sk, 0);
3753
                        return 0;
3754
                }
3755
        }
3756
 
3757
slow_path:
3758
        if (len < (th->doff<<2) || tcp_checksum_complete_user(sk, skb))
3759
                goto csum_error;
3760
 
3761
        /*
3762
         * RFC1323: H1. Apply PAWS check first.
3763
         */
3764
        if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
3765
            tcp_paws_discard(tp, skb)) {
3766
                if (!th->rst) {
3767
                        NET_INC_STATS_BH(PAWSEstabRejected);
3768
                        tcp_send_dupack(sk, skb);
3769
                        goto discard;
3770
                }
3771
                /* Resets are accepted even if PAWS failed.
3772
 
3773
                   ts_recent update must be made after we are sure
3774
                   that the packet is in window.
3775
                 */
3776
        }
3777
 
3778
        /*
3779
         *      Standard slow path.
3780
         */
3781
 
3782
        if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
3783
                /* RFC793, page 37: "In all states except SYN-SENT, all reset
3784
                 * (RST) segments are validated by checking their SEQ-fields."
3785
                 * And page 69: "If an incoming segment is not acceptable,
3786
                 * an acknowledgment should be sent in reply (unless the RST bit
3787
                 * is set, if so drop the segment and return)".
3788
                 */
3789
                if (!th->rst)
3790
                        tcp_send_dupack(sk, skb);
3791
                goto discard;
3792
        }
3793
 
3794
        if(th->rst) {
3795
                tcp_reset(sk);
3796
                goto discard;
3797
        }
3798
 
3799
        tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
3800
 
3801
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
3802
                TCP_INC_STATS_BH(TcpInErrs);
3803
                NET_INC_STATS_BH(TCPAbortOnSyn);
3804
                tcp_reset(sk);
3805
                return 1;
3806
        }
3807
 
3808
step5:
3809
        if(th->ack)
3810
                tcp_ack(sk, skb, FLAG_SLOWPATH);
3811
 
3812
        /* Process urgent data. */
3813
        tcp_urg(sk, skb, th);
3814
 
3815
        /* step 7: process the segment text */
3816
        tcp_data_queue(sk, skb);
3817
 
3818
        tcp_data_snd_check(sk);
3819
        tcp_ack_snd_check(sk);
3820
        return 0;
3821
 
3822
csum_error:
3823
        TCP_INC_STATS_BH(TcpInErrs);
3824
 
3825
discard:
3826
        __kfree_skb(skb);
3827
        return 0;
3828
}
3829
 
3830
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
3831
                                         struct tcphdr *th, unsigned len)
3832
{
3833
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
3834
        int saved_clamp = tp->mss_clamp;
3835
 
3836
        tcp_parse_options(skb, tp, 0);
3837
 
3838
        if (th->ack) {
3839
                /* rfc793:
3840
                 * "If the state is SYN-SENT then
3841
                 *    first check the ACK bit
3842
                 *      If the ACK bit is set
3843
                 *        If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
3844
                 *        a reset (unless the RST bit is set, if so drop
3845
                 *        the segment and return)"
3846
                 *
3847
                 *  We do not send data with SYN, so that RFC-correct
3848
                 *  test reduces to:
3849
                 */
3850
                if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
3851
                        goto reset_and_undo;
3852
 
3853
                if (tp->saw_tstamp && tp->rcv_tsecr &&
3854
                    !between(tp->rcv_tsecr, tp->retrans_stamp, tcp_time_stamp)) {
3855
                        NET_INC_STATS_BH(PAWSActiveRejected);
3856
                        goto reset_and_undo;
3857
                }
3858
 
3859
                /* Now ACK is acceptable.
3860
                 *
3861
                 * "If the RST bit is set
3862
                 *    If the ACK was acceptable then signal the user "error:
3863
                 *    connection reset", drop the segment, enter CLOSED state,
3864
                 *    delete TCB, and return."
3865
                 */
3866
 
3867
                if (th->rst) {
3868
                        tcp_reset(sk);
3869
                        goto discard;
3870
                }
3871
 
3872
                /* rfc793:
3873
                 *   "fifth, if neither of the SYN or RST bits is set then
3874
                 *    drop the segment and return."
3875
                 *
3876
                 *    See note below!
3877
                 *                                        --ANK(990513)
3878
                 */
3879
                if (!th->syn)
3880
                        goto discard_and_undo;
3881
 
3882
                /* rfc793:
3883
                 *   "If the SYN bit is on ...
3884
                 *    are acceptable then ...
3885
                 *    (our SYN has been ACKed), change the connection
3886
                 *    state to ESTABLISHED..."
3887
                 */
3888
 
3889
                TCP_ECN_rcv_synack(tp, th);
3890
 
3891
                tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
3892
                tcp_ack(sk, skb, FLAG_SLOWPATH);
3893
 
3894
                /* Ok.. it's good. Set up sequence numbers and
3895
                 * move to established.
3896
                 */
3897
                tp->rcv_nxt = TCP_SKB_CB(skb)->seq+1;
3898
                tp->rcv_wup = TCP_SKB_CB(skb)->seq+1;
3899
 
3900
                /* RFC1323: The window in SYN & SYN/ACK segments is
3901
                 * never scaled.
3902
                 */
3903
                tp->snd_wnd = ntohs(th->window);
3904
                tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
3905
 
3906
                if (tp->wscale_ok == 0) {
3907
                        tp->snd_wscale = tp->rcv_wscale = 0;
3908
                        tp->window_clamp = min(tp->window_clamp, 65535U);
3909
                }
3910
 
3911
                if (tp->saw_tstamp) {
3912
                        tp->tstamp_ok = 1;
3913
                        tp->tcp_header_len =
3914
                                sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
3915
                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
3916
                        tcp_store_ts_recent(tp);
3917
                } else {
3918
                        tp->tcp_header_len = sizeof(struct tcphdr);
3919
                }
3920
 
3921
                if (tp->sack_ok && sysctl_tcp_fack)
3922
                        tp->sack_ok |= 2;
3923
 
3924
                tcp_sync_mss(sk, tp->pmtu_cookie);
3925
                tcp_initialize_rcv_mss(sk);
3926
                tcp_init_metrics(sk);
3927
                tcp_init_buffer_space(sk);
3928
 
3929
                if (sk->keepopen)
3930
                        tcp_reset_keepalive_timer(sk, keepalive_time_when(tp));
3931
 
3932
                if (tp->snd_wscale == 0)
3933
                        __tcp_fast_path_on(tp, tp->snd_wnd);
3934
                else
3935
                        tp->pred_flags = 0;
3936
 
3937
                /* Remember, tcp_poll() does not lock socket!
3938
                 * Change state from SYN-SENT only after copied_seq
3939
                 * is initialized. */
3940
                tp->copied_seq = tp->rcv_nxt;
3941
                mb();
3942
                tcp_set_state(sk, TCP_ESTABLISHED);
3943
 
3944
                if(!sk->dead) {
3945
                        sk->state_change(sk);
3946
                        sk_wake_async(sk, 0, POLL_OUT);
3947
                }
3948
 
3949
                if (tp->write_pending || tp->defer_accept || tp->ack.pingpong) {
3950
                        /* Save one ACK. Data will be ready after
3951
                         * several ticks, if write_pending is set.
3952
                         *
3953
                         * It may be deleted, but with this feature tcpdumps
3954
                         * look so _wonderfully_ clever, that I was not able
3955
                         * to stand against the temptation 8)     --ANK
3956
                         */
3957
                        tcp_schedule_ack(tp);
3958
                        tp->ack.lrcvtime = tcp_time_stamp;
3959
                        tp->ack.ato = TCP_ATO_MIN;
3960
                        tcp_incr_quickack(tp);
3961
                        tcp_enter_quickack_mode(tp);
3962
                        tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX);
3963
 
3964
discard:
3965
                        __kfree_skb(skb);
3966
                        return 0;
3967
                } else {
3968
                        tcp_send_ack(sk);
3969
                }
3970
                return -1;
3971
        }
3972
 
3973
        /* No ACK in the segment */
3974
 
3975
        if (th->rst) {
3976
                /* rfc793:
3977
                 * "If the RST bit is set
3978
                 *
3979
                 *      Otherwise (no ACK) drop the segment and return."
3980
                 */
3981
 
3982
                goto discard_and_undo;
3983
        }
3984
 
3985
        /* PAWS check. */
3986
        if (tp->ts_recent_stamp && tp->saw_tstamp && tcp_paws_check(tp, 0))
3987
                goto discard_and_undo;
3988
 
3989
        if (th->syn) {
3990
                /* We see SYN without ACK. It is attempt of
3991
                 * simultaneous connect with crossed SYNs.
3992
                 * Particularly, it can be connect to self.
3993
                 */
3994
                tcp_set_state(sk, TCP_SYN_RECV);
3995
 
3996
                if (tp->saw_tstamp) {
3997
                        tp->tstamp_ok = 1;
3998
                        tcp_store_ts_recent(tp);
3999
                        tp->tcp_header_len =
4000
                                sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
4001
                } else {
4002
                        tp->tcp_header_len = sizeof(struct tcphdr);
4003
                }
4004
 
4005
                tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
4006
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
4007
 
4008
                /* RFC1323: The window in SYN & SYN/ACK segments is
4009
                 * never scaled.
4010
                 */
4011
                tp->snd_wnd = ntohs(th->window);
4012
                tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
4013
                tp->max_window = tp->snd_wnd;
4014
 
4015
                tcp_sync_mss(sk, tp->pmtu_cookie);
4016
                tcp_initialize_rcv_mss(sk);
4017
 
4018
                TCP_ECN_rcv_syn(tp, th);
4019
 
4020
                tcp_send_synack(sk);
4021
#if 0
4022
                /* Note, we could accept data and URG from this segment.
4023
                 * There are no obstacles to make this.
4024
                 *
4025
                 * However, if we ignore data in ACKless segments sometimes,
4026
                 * we have no reasons to accept it sometimes.
4027
                 * Also, seems the code doing it in step6 of tcp_rcv_state_process
4028
                 * is not flawless. So, discard packet for sanity.
4029
                 * Uncomment this return to process the data.
4030
                 */
4031
                return -1;
4032
#else
4033
                goto discard;
4034
#endif
4035
        }
4036
        /* "fifth, if neither of the SYN or RST bits is set then
4037
         * drop the segment and return."
4038
         */
4039
 
4040
discard_and_undo:
4041
        tcp_clear_options(tp);
4042
        tp->mss_clamp = saved_clamp;
4043
        goto discard;
4044
 
4045
reset_and_undo:
4046
        tcp_clear_options(tp);
4047
        tp->mss_clamp = saved_clamp;
4048
        return 1;
4049
}
4050
 
4051
/*
4052
 *      This function implements the receiving procedure of RFC 793 for
4053
 *      all states except ESTABLISHED and TIME_WAIT.
4054
 *      It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
4055
 *      address independent.
4056
 */
4057
 
4058
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4059
                          struct tcphdr *th, unsigned len)
4060
{
4061
        struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
4062
        int queued = 0;
4063
 
4064
        tp->saw_tstamp = 0;
4065
 
4066
        switch (sk->state) {
4067
        case TCP_CLOSE:
4068
                goto discard;
4069
 
4070
        case TCP_LISTEN:
4071
                if(th->ack)
4072
                        return 1;
4073
 
4074
                if(th->rst)
4075
                        goto discard;
4076
 
4077
                if(th->syn) {
4078
                        if(tp->af_specific->conn_request(sk, skb) < 0)
4079
                                return 1;
4080
 
4081
                        tcp_init_westwood(sk);
4082
 
4083
                        /* Now we have several options: In theory there is
4084
                         * nothing else in the frame. KA9Q has an option to
4085
                         * send data with the syn, BSD accepts data with the
4086
                         * syn up to the [to be] advertised window and
4087
                         * Solaris 2.1 gives you a protocol error. For now
4088
                         * we just ignore it, that fits the spec precisely
4089
                         * and avoids incompatibilities. It would be nice in
4090
                         * future to drop through and process the data.
4091
                         *
4092
                         * Now that TTCP is starting to be used we ought to
4093
                         * queue this data.
4094
                         * But, this leaves one open to an easy denial of
4095
                         * service attack, and SYN cookies can't defend
4096
                         * against this problem. So, we drop the data
4097
                         * in the interest of security over speed.
4098
                         */
4099
                        goto discard;
4100
                }
4101
                goto discard;
4102
 
4103
        case TCP_SYN_SENT:
4104
                tcp_init_westwood(sk);
4105
 
4106
                queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
4107
                if (queued >= 0)
4108
                        return queued;
4109
 
4110
                /* Do step6 onward by hand. */
4111
                tcp_urg(sk, skb, th);
4112
                __kfree_skb(skb);
4113
                tcp_data_snd_check(sk);
4114
                return 0;
4115
        }
4116
 
4117
        if (tcp_fast_parse_options(skb, th, tp) && tp->saw_tstamp &&
4118
            tcp_paws_discard(tp, skb)) {
4119
                if (!th->rst) {
4120
                        NET_INC_STATS_BH(PAWSEstabRejected);
4121
                        tcp_send_dupack(sk, skb);
4122
                        goto discard;
4123
                }
4124
                /* Reset is accepted even if it did not pass PAWS. */
4125
        }
4126
 
4127
        /* step 1: check sequence number */
4128
        if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
4129
                if (!th->rst)
4130
                        tcp_send_dupack(sk, skb);
4131
                goto discard;
4132
        }
4133
 
4134
        /* step 2: check RST bit */
4135
        if(th->rst) {
4136
                tcp_reset(sk);
4137
                goto discard;
4138
        }
4139
 
4140
        tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
4141
 
4142
        /* step 3: check security and precedence [ignored] */
4143
 
4144
        /*      step 4:
4145
         *
4146
         *      Check for a SYN in window.
4147
         */
4148
        if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4149
                NET_INC_STATS_BH(TCPAbortOnSyn);
4150
                tcp_reset(sk);
4151
                return 1;
4152
        }
4153
 
4154
        /* step 5: check the ACK field */
4155
        if (th->ack) {
4156
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
4157
 
4158
                switch(sk->state) {
4159
                case TCP_SYN_RECV:
4160
                        if (acceptable) {
4161
                                tp->copied_seq = tp->rcv_nxt;
4162
                                mb();
4163
                                tcp_set_state(sk, TCP_ESTABLISHED);
4164
                                sk->state_change(sk);
4165
 
4166
                                /* Note, that this wakeup is only for marginal
4167
                                 * crossed SYN case. Passively open sockets
4168
                                 * are not waked up, because sk->sleep == NULL
4169
                                 * and sk->socket == NULL.
4170
                                 */
4171
                                if (sk->socket) {
4172
                                        sk_wake_async(sk,0,POLL_OUT);
4173
                                }
4174
 
4175
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
4176
                                tp->snd_wnd = ntohs(th->window) << tp->snd_wscale;
4177
                                tcp_init_wl(tp, TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(skb)->seq);
4178
 
4179
                                /* tcp_ack considers this ACK as duplicate
4180
                                 * and does not calculate rtt.
4181
                                 * Fix it at least with timestamps.
4182
                                 */
4183
                                if (tp->saw_tstamp && tp->rcv_tsecr && !tp->srtt)
4184
                                        tcp_ack_saw_tstamp(tp, 0);
4185
 
4186
                                if (tp->tstamp_ok)
4187
                                        tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
4188
 
4189
                                tcp_init_metrics(sk);
4190
                                tcp_initialize_rcv_mss(sk);
4191
                                tcp_init_buffer_space(sk);
4192
                                tcp_fast_path_on(tp);
4193
                        } else {
4194
                                return 1;
4195
                        }
4196
                        break;
4197
 
4198
                case TCP_FIN_WAIT1:
4199
                        if (tp->snd_una == tp->write_seq) {
4200
                                tcp_set_state(sk, TCP_FIN_WAIT2);
4201
                                sk->shutdown |= SEND_SHUTDOWN;
4202
                                dst_confirm(sk->dst_cache);
4203
 
4204
                                if (!sk->dead) {
4205
                                        /* Wake up lingering close() */
4206
                                        sk->state_change(sk);
4207
                                } else {
4208
                                        int tmo;
4209
 
4210
                                        if (tp->linger2 < 0 ||
4211
                                            (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4212
                                             after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
4213
                                                tcp_done(sk);
4214
                                                NET_INC_STATS_BH(TCPAbortOnData);
4215
                                                return 1;
4216
                                        }
4217
 
4218
                                        tmo = tcp_fin_time(tp);
4219
                                        if (tmo > TCP_TIMEWAIT_LEN) {
4220
                                                tcp_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
4221
                                        } else if (th->fin || sk->lock.users) {
4222
                                                /* Bad case. We could lose such FIN otherwise.
4223
                                                 * It is not a big problem, but it looks confusing
4224
                                                 * and not so rare event. We still can lose it now,
4225
                                                 * if it spins in bh_lock_sock(), but it is really
4226
                                                 * marginal case.
4227
                                                 */
4228
                                                tcp_reset_keepalive_timer(sk, tmo);
4229
                                        } else {
4230
                                                tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
4231
                                                goto discard;
4232
                                        }
4233
                                }
4234
                        }
4235
                        break;
4236
 
4237
                case TCP_CLOSING:
4238
                        if (tp->snd_una == tp->write_seq) {
4239
                                tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4240
                                goto discard;
4241
                        }
4242
                        break;
4243
 
4244
                case TCP_LAST_ACK:
4245
                        if (tp->snd_una == tp->write_seq) {
4246
                                tcp_update_metrics(sk);
4247
                                tcp_done(sk);
4248
                                goto discard;
4249
                        }
4250
                        break;
4251
                }
4252
        } else
4253
                goto discard;
4254
 
4255
        /* step 6: check the URG bit */
4256
        tcp_urg(sk, skb, th);
4257
 
4258
        /* step 7: process the segment text */
4259
        switch (sk->state) {
4260
        case TCP_CLOSE_WAIT:
4261
        case TCP_CLOSING:
4262
        case TCP_LAST_ACK:
4263
                if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4264
                        break;
4265
        case TCP_FIN_WAIT1:
4266
        case TCP_FIN_WAIT2:
4267
                /* RFC 793 says to queue data in these states,
4268
                 * RFC 1122 says we MUST send a reset.
4269
                 * BSD 4.4 also does reset.
4270
                 */
4271
                if (sk->shutdown & RCV_SHUTDOWN) {
4272
                        if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4273
                            after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
4274
                                NET_INC_STATS_BH(TCPAbortOnData);
4275
                                tcp_reset(sk);
4276
                                return 1;
4277
                        }
4278
                }
4279
                /* Fall through */
4280
        case TCP_ESTABLISHED:
4281
                tcp_data_queue(sk, skb);
4282
                queued = 1;
4283
                break;
4284
        }
4285
 
4286
        /* tcp_data could move socket to TIME-WAIT */
4287
        if (sk->state != TCP_CLOSE) {
4288
                tcp_data_snd_check(sk);
4289
                tcp_ack_snd_check(sk);
4290
        }
4291
 
4292
        if (!queued) {
4293
discard:
4294
                __kfree_skb(skb);
4295
        }
4296
        return 0;
4297
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.