OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [net/] [ipv4/] [tcp_output.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1629 jcastillo
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              Implementation of the Transmission Control Protocol(TCP).
7
 *
8
 * Version:     @(#)tcp_input.c 1.0.16  05/25/93
9
 *
10
 * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
11
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12
 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
13
 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
14
 *              Florian La Roche, <flla@stud.uni-sb.de>
15
 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16
 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
17
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
18
 *              Matthew Dillon, <dillon@apollo.west.oic.com>
19
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20
 *              Jorge Cwik, <jorge@laser.satlink.net>
21
 *
22
 * Fixes:       Eric Schenk     : avoid multiple retransmissions in one
23
 *                              : round trip timeout.
24
 *              Eric Schenk     : tcp rst and syn cookies to deal
25
 *                              : with synflooding attacks.
26
 *              Eric Schenk     : If a simultaneous close occurred, and the
27
 *                                connection was over an assymetric route, we
28
 *                                would lose badly if we dropped our outgoing
29
 *                                FIN because the route disappeared on us.
30
 *                                We now handle this case correctly.
31
 *              Eric Schenk     : Handle the case where a route changes, and
32
 *                                thus the outgoing device does as well, when
33
 *                                skb's are on the retransmit queue which still
34
 *                                refer to the old obsolete destination.
35
 *              Elliot Poger    : Added support for SO_BINDTODEVICE.
36
 *      Juan Jose Ciarlante     : Added sock dynamic source address rewriting
37
 *              Alan Cox        : Clear reserved fields - bug reported by
38
 *                                J Hadi Salim
39
 */
40
 
41
#include <linux/config.h>
42
#include <net/tcp.h>
43
#include <linux/ip_fw.h>
44
#include <linux/firewall.h>
45
#include <linux/interrupt.h>
46
#ifdef CONFIG_RST_COOKIES
47
#include <linux/random.h>
48
#endif
49
 
50
/*
51
 * RFC 1122 says:
52
 *
53
 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
54
 *  RECV.NEXT + RCV.WIN fixed until:
55
 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
56
 *
57
 * We do BSD style SWS avoidance -- note that RFC1122 only says we
58
 * must do silly window avoidance, it does not require that we use
59
 * the suggested algorithm. Following BSD avoids breaking header
60
 * prediction.
61
 *
62
 * The "rcvbuf" and "rmem_alloc" values are shifted by 1, because
63
 * they also contain buffer handling overhead etc, so the window
64
 * we actually use is essentially based on only half those values.
65
 */
66
int tcp_new_window(struct sock * sk)
67
{
68
        unsigned long window = sk->window;
69
        unsigned long minwin, maxwin;
70
        unsigned long free_space;
71
 
72
        /* Get minimum and maximum window values.. */
73
        minwin = sk->mss;
74
        if (!minwin)
75
                minwin = sk->mtu;
76
        if (!minwin) {
77
                printk(KERN_DEBUG "tcp_new_window: mss fell to 0.\n");
78
                minwin = 1;
79
        }
80
        maxwin = sk->window_clamp;
81
        if (!maxwin)
82
                maxwin = MAX_WINDOW;
83
 
84
        if (minwin > maxwin/2)
85
                minwin = maxwin/2;
86
 
87
        /* Get current rcvbuf size.. */
88
        free_space = sk->rcvbuf/2;
89
        if (free_space < minwin) {
90
                sk->rcvbuf = minwin*2;
91
                free_space = minwin;
92
        }
93
 
94
        /* Check rcvbuf against used and minimum window */
95
        free_space -= sk->rmem_alloc/2;
96
        if ((long)(free_space - minwin) < 0)             /* SWS avoidance */
97
                return 0;
98
 
99
        /* Try to avoid the divide and multiply if we can */
100
        if (window <= free_space - minwin || window > free_space)
101
                window = (free_space/minwin)*minwin;
102
 
103
        if (window > maxwin)
104
                window = maxwin;
105
        return window;
106
}
107
 
108
/*
109
 *      Get rid of any delayed acks, we sent one already..
110
 */
111
static __inline__ void clear_delayed_acks(struct sock * sk)
112
{
113
        sk->ack_timed = 0;
114
        sk->ack_backlog = 0;
115
        sk->bytes_rcv = 0;
116
        del_timer(&sk->delack_timer);
117
}
118
 
119
/*
120
 *      This is the main buffer sending routine. We queue the buffer
121
 *      having checked it is sane seeming.
122
 */
123
 
124
void tcp_send_skb(struct sock *sk, struct sk_buff *skb)
125
{
126
        int size;
127
        struct tcphdr * th = skb->h.th;
128
 
129
        /*
130
         *      length of packet (not counting length of pre-tcp headers)
131
         */
132
 
133
        size = skb->len - ((unsigned char *) th - skb->data);
134
 
135
        /*
136
         *      Sanity check it..
137
         */
138
 
139
        if (size < sizeof(struct tcphdr) || size > skb->len)
140
        {
141
                printk(KERN_ERR "tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
142
                        skb, skb->data, th, skb->len);
143
                kfree_skb(skb, FREE_WRITE);
144
                return;
145
        }
146
 
147
        /*
148
         *      If we have queued a header size packet.. (these crash a few
149
         *      tcp stacks if ack is not set)
150
         */
151
 
152
        if (size == sizeof(struct tcphdr))
153
        {
154
                /* If it's got a syn or fin it's notionally included in the size..*/
155
                if(!th->syn && !th->fin)
156
                {
157
                        printk(KERN_ERR "tcp_send_skb: attempt to queue a bogon.\n");
158
                        kfree_skb(skb,FREE_WRITE);
159
                        return;
160
                }
161
        }
162
 
163
        /*
164
         * Jacobson recommends this in the appendix of his SIGCOMM'88 paper.
165
         * The idea is to do a slow start again if we haven't been doing
166
         * anything for a long time, in which case we have no reason to
167
         * believe that our congestion window is still correct.
168
         */
169
        if (sk->send_head == 0 && (jiffies - sk->idletime) > sk->rto) {
170
                sk->cong_window = 1;
171
                sk->cong_count = 0;
172
        }
173
 
174
        /*
175
         *      Actual processing.
176
         */
177
 
178
        tcp_statistics.TcpOutSegs++;
179
        skb->seq = ntohl(th->seq);
180
        skb->end_seq = skb->seq + size - 4*th->doff + th->fin;
181
 
182
        /*
183
         *      We must queue if
184
         *
185
         *      a) The right edge of this frame exceeds the window
186
         *      b) We are retransmitting (Nagle's rule)
187
         *      c) We have too many packets 'in flight'
188
         */
189
 
190
        if (after(skb->end_seq, sk->window_seq) ||
191
            (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
192
             sk->packets_out >= sk->cong_window)
193
        {
194
                /* checksum will be supplied by tcp_write_xmit.  So
195
                 * we shouldn't need to set it at all.  I'm being paranoid */
196
                th->check = 0;
197
                if (skb->next != NULL)
198
                {
199
                        printk(KERN_ERR "tcp_send_partial: next != NULL\n");
200
                        skb_unlink(skb);
201
                }
202
                skb_queue_tail(&sk->write_queue, skb);
203
 
204
                if (before(sk->window_seq, sk->write_queue.next->end_seq) &&
205
                    sk->send_head == NULL && sk->ack_backlog == 0)
206
                        tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
207
        }
208
        else
209
        {
210
                /*
211
                 *      This is going straight out
212
                 */
213
                clear_delayed_acks(sk);
214
                th->ack_seq = htonl(sk->acked_seq);
215
                th->window = htons(tcp_select_window(sk));
216
 
217
                tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
218
 
219
                sk->sent_seq = sk->write_seq;
220
 
221
                /*
222
                 *      This is mad. The tcp retransmit queue is put together
223
                 *      by the ip layer. This causes half the problems with
224
                 *      unroutable FIN's and other things.
225
                 */
226
 
227
                sk->prot->queue_xmit(sk, skb->dev, skb, 0);
228
 
229
                /*
230
                 *      Set for next retransmit based on expected ACK time
231
                 *      of the first packet in the resend queue.
232
                 *      This is no longer a window behind.
233
                 */
234
 
235
                tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
236
        }
237
}
238
 
239
/*
240
 *      Locking problems lead us to a messy situation where we can have
241
 *      multiple partially complete buffers queued up. This is really bad
242
 *      as we don't want to be sending partial buffers. Fix this with
243
 *      a semaphore or similar to lock tcp_write per socket.
244
 *
245
 *      These routines are pretty self descriptive.
246
 */
247
 
248
struct sk_buff * tcp_dequeue_partial(struct sock * sk)
249
{
250
        struct sk_buff * skb;
251
        unsigned long flags;
252
 
253
        save_flags(flags);
254
        cli();
255
        skb = sk->partial;
256
        if (skb) {
257
                sk->partial = NULL;
258
                del_timer(&sk->partial_timer);
259
        }
260
        restore_flags(flags);
261
        return skb;
262
}
263
 
264
/*
265
 *      Empty the partial queue
266
 */
267
 
268
void tcp_send_partial(struct sock *sk)
269
{
270
        struct sk_buff *skb;
271
 
272
        if (sk == NULL)
273
                return;
274
        while ((skb = tcp_dequeue_partial(sk)) != NULL)
275
                tcp_send_skb(sk, skb);
276
}
277
 
278
/*
279
 *      Queue a partial frame
280
 */
281
 
282
void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk)
283
{
284
        struct sk_buff * tmp;
285
        unsigned long flags;
286
 
287
        save_flags(flags);
288
        cli();
289
        tmp = sk->partial;
290
        if (tmp)
291
                del_timer(&sk->partial_timer);
292
        sk->partial = skb;
293
        init_timer(&sk->partial_timer);
294
        /*
295
         *      Wait up to 30 second for the buffer to fill.
296
         *      ( I have no idea why this timer is here!
297
         *        It seems to be sillyness for interactive response. Linus?
298
         *      )
299
         */
300
        sk->partial_timer.expires = jiffies+30*HZ;
301
        sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial;
302
        sk->partial_timer.data = (unsigned long) sk;
303
        add_timer(&sk->partial_timer);
304
        restore_flags(flags);
305
        if (tmp)
306
                tcp_send_skb(sk, tmp);
307
}
308
 
309
/*
310
 *      This routine takes stuff off of the write queue,
311
 *      and puts it in the xmit queue. This happens as incoming acks
312
 *      open up the remote window for us.
313
 */
314
 
315
void tcp_write_xmit(struct sock *sk)
316
{
317
        struct sk_buff *skb;
318
 
319
        /*
320
         *      The bytes will have to remain here. In time closedown will
321
         *      empty the write queue and all will be happy
322
         */
323
 
324
        if(sk->zapped)
325
                return;
326
 
327
        /*
328
         *      Anything on the transmit queue that fits the window can
329
         *      be added providing we are not
330
         *
331
         *      a) retransmitting (Nagle's rule)
332
         *      b) exceeding our congestion window.
333
         */
334
 
335
        while((skb = skb_peek(&sk->write_queue)) != NULL &&
336
                !after(skb->end_seq, sk->window_seq) &&
337
                (sk->retransmits == 0 ||
338
                 sk->ip_xmit_timeout != TIME_WRITE ||
339
                 !after(skb->end_seq, sk->rcv_ack_seq))
340
                && sk->packets_out < sk->cong_window)
341
        {
342
                IS_SKB(skb);
343
                skb_unlink(skb);
344
 
345
                /*
346
                 *      See if we really need to send the whole packet.
347
                 */
348
 
349
                if (before(skb->end_seq, sk->rcv_ack_seq +1)) {
350
                        /*
351
                         *      This is acked data. We can discard it.
352
                         *      This implies the packet was sent out
353
                         *      of the write queue by a zero window probe.
354
                         */
355
 
356
                        sk->retransmits = 0;
357
                        kfree_skb(skb, FREE_WRITE);
358
                        if (!sk->dead)
359
                                sk->write_space(sk);
360
                } else {
361
                        struct tcphdr *th;
362
                        struct iphdr *iph;
363
                        int size;
364
 
365
                        iph = skb->ip_hdr;
366
                        th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
367
 
368
                        /* See if we need to shrink the leading packet on
369
                         * the retransmit queue. Strictly speaking, we
370
                         * should never need to do this, but some buggy TCP
371
                         * implementations get confused if you send them
372
                         * a packet that contains both old and new data. (Feh!)
373
                         * Soooo, we have this uglyness here.
374
                         */
375
                        if (after(sk->rcv_ack_seq,skb->seq+th->syn))
376
                                tcp_shrink_skb(sk,skb,sk->rcv_ack_seq);
377
 
378
                        size = skb->len - (((unsigned char *) th) - skb->data);
379
#ifndef CONFIG_NO_PATH_MTU_DISCOVERY
380
                        if (size > sk->mtu - sizeof(struct iphdr))
381
                        {
382
                                iph->frag_off &= ~htons(IP_DF);
383
                                ip_send_check(iph);
384
                        }
385
#endif
386
 
387
/*
388
 * put in the ack seq and window at this point rather than earlier,
389
 * in order to keep them monotonic.  We really want to avoid taking
390
 * back window allocations.  That's legal, but RFC1122 says it's frowned on.
391
 * Ack and window will in general have changed since this packet was put
392
 * on the write queue.
393
 */
394
                        th->ack_seq = htonl(sk->acked_seq);
395
                        th->window = htons(tcp_select_window(sk));
396
 
397
                        tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
398
 
399
                        sk->sent_seq = skb->end_seq;
400
 
401
                        /*
402
                         *      IP manages our queue for some crazy reason
403
                         */
404
#ifndef NO_DAVEM_FIX                     
405
                        sk->prot->queue_xmit(sk, skb->dev, skb, 0);
406
#else
407
                        sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
408
#endif
409
 
410
                        clear_delayed_acks(sk);
411
 
412
                        tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
413
                }
414
        }
415
}
416
 
417
 
418
/*
419
 *      A socket has timed out on its send queue and wants to do a
420
 *      little retransmitting. Currently this means TCP.
421
 */
422
 
423
void tcp_do_retransmit(struct sock *sk, int all)
424
{
425
        struct sk_buff * skb;
426
        struct proto *prot;
427
        struct device *dev;
428
        struct rtable *rt;
429
 
430
        prot = sk->prot;
431
        if (!all) {
432
                /*
433
                 * If we are just retransmitting one packet reset
434
                 * to the start of the queue.
435
                 */
436
                sk->send_next = sk->send_head;
437
                sk->packets_out = 0;
438
        }
439
        skb = sk->send_next;
440
 
441
        while (skb != NULL)
442
        {
443
                struct tcphdr *th;
444
                struct iphdr *iph;
445
                int size;
446
                unsigned long flags;
447
 
448
                dev = skb->dev;
449
                IS_SKB(skb);
450
                skb->when = jiffies;
451
 
452
                /* dl1bke 960201 - @%$$! Hope this cures strange race conditions    */
453
                /*                 with AX.25 mode VC. (esp. DAMA)                  */
454
                /*                 if the buffer is locked we should not retransmit */
455
                /*                 anyway, so we don't need all the fuss to prepare */
456
                /*                 the buffer in this case.                         */
457
                /*                 (the skb_pull() changes skb->data while we may   */
458
                /*                 actually try to send the data. Ouch. A side      */
459
                /*                 effect is that we'll send some unnecessary data, */
460
                /*                 but the alternative is disastrous...     */
461
 
462
                save_flags(flags);
463
                cli();
464
 
465
                if (skb_device_locked(skb)) {
466
                        restore_flags(flags);
467
                        break;
468
                }
469
 
470
                /* Unlink from any chain */
471
                skb_unlink(skb);
472
 
473
                restore_flags(flags);
474
 
475
                /*
476
                 *      Discard the surplus MAC header
477
                 */
478
 
479
                skb_pull(skb,((unsigned char *)skb->ip_hdr)-skb->data);
480
 
481
                /*
482
                 * In general it's OK just to use the old packet.  However we
483
                 * need to use the current ack and window fields.  Urg and
484
                 * urg_ptr could possibly stand to be updated as well, but we
485
                 * don't keep the necessary data.  That shouldn't be a problem,
486
                 * if the other end is doing the right thing.  Since we're
487
                 * changing the packet, we have to issue a new IP identifier.
488
                 */
489
 
490
                iph = (struct iphdr *)skb->data;
491
                th = (struct tcphdr *)(((char *)iph) + (iph->ihl << 2));
492
 
493
                /* See if we need to shrink the leading packet on
494
                 * the retransmit queue. Strictly speaking, we
495
                 * should never need to do this, but some buggy TCP
496
                 * implementations get confused if you send them
497
                 * a packet that contains both old and new data. (Feh!)
498
                 * Soooo, we have this uglyness here.
499
                 *
500
                 * Is the && test needed here? If so, then it implies that
501
                 * we might be retransmitting an acked packet. This code is
502
                 * needed here to talk to Solaris 2.6 stack.
503
                 */
504
                if (after(sk->rcv_ack_seq,skb->seq+th->syn) && before(sk->rcv_ack_seq, skb->end_seq))
505
                        tcp_shrink_skb(sk,skb,sk->rcv_ack_seq);
506
 
507
                size = ntohs(iph->tot_len) - (iph->ihl<<2);
508
 
509
                /*
510
                 *      Note: We ought to check for window limits here but
511
                 *      currently this is done (less efficiently) elsewhere.
512
                 */
513
 
514
                /*
515
                 *      Put a MAC header back on (may cause ARPing)
516
                 */
517
 
518
                {
519
                        /* ANK: UGLY, but the bug, that was here, should be fixed.
520
                         */
521
                        struct options *  opt = (struct options*)skb->proto_priv;
522
                        rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr,
523
                                            skb->localroute, sk->bound_device);
524
                }
525
 
526
                iph->id = htons(ip_id_count++);
527
#ifndef CONFIG_NO_PATH_MTU_DISCOVERY
528
                if (rt && ntohs(iph->tot_len) > rt->rt_mtu)
529
                        iph->frag_off &= ~htons(IP_DF);
530
#endif
531
                ip_send_check(iph);
532
 
533
                if (rt==NULL)   /* Deep poo */
534
                {
535
                        if(skb->sk)
536
                        {
537
                                skb->sk->err_soft=ENETUNREACH;
538
                                skb->sk->error_report(skb->sk);
539
                        }
540
                        /* Can't transmit this packet, no reason
541
                         * to transmit the later ones, even if
542
                         * the congestion window allows.
543
                         */
544
                        break;
545
                }
546
                else
547
                {
548
                        dev=rt->rt_dev;
549
                        if (skb->dev != dev && skb->link3 == 0
550
                        && !skb_queue_empty(&sk->write_queue)) {
551
                                /* THIS IS UGLY. DON'T SHOW THIS TO YOUR MOTHER. --erics
552
                                 * Route shifted devices.
553
                                 * If this is the last packet in the
554
                                 * retransmit queue, then we should walk
555
                                 * the chain of packets in the write_queue
556
                                 * that have the same device and
557
                                 * fix routing on these packets as well.
558
                                 * If we fail to do this, then every packet
559
                                 * in the transmit queue will incurr a
560
                                 * retransmit with the backed off retransmit
561
                                 * timeout. This is very bad.
562
                                 */
563
                                struct sk_buff *skb2 = sk->write_queue.next;
564
                                while (skb2 && skb2->dev == skb->dev) {
565
                                        skb2->raddr=rt->rt_gateway;
566
                                        if (sysctl_ip_dynaddr & 4 || (sk->state == TCP_SYN_SENT && sysctl_ip_dynaddr & 3))
567
                                                ip_rewrite_addrs (sk, skb2, dev);
568
                                        skb_pull(skb2,((unsigned char *)skb2->ip_hdr)-skb2->data);
569
                                        skb2->dev = dev;
570
                                        skb2->arp=1;
571
                                        if (rt->rt_hh)
572
                                        {
573
                                                memcpy(skb_push(skb2,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
574
                                                if (!rt->rt_hh->hh_uptodate)
575
                                                {
576
                                                        skb2->arp = 0;
577
#if RT_CACHE_DEBUG >= 2
578
                                                        printk("tcp_do_retransmit(1): hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
579
#endif
580
                                                }
581
                                        }
582
                                        else if (dev->hard_header)
583
                                        {
584
                                                if(dev->hard_header(skb2, dev, ETH_P_IP, NULL, NULL, skb2->len)<0)
585
                                                        skb2->arp=0;
586
                                        }
587
 
588
                                        skb2 = skb2->next;
589
                                }
590
                        }
591
                        skb->raddr=rt->rt_gateway;
592
                        if ((skb->dev !=dev || skb->dev->pa_addr != skb->ip_hdr->saddr) && (sysctl_ip_dynaddr & 4 || (sk->state == TCP_SYN_SENT && sysctl_ip_dynaddr & 3)))
593
                                ip_rewrite_addrs(sk, skb, dev);
594
                        skb->dev=dev;
595
                        skb->arp=1;
596
#ifdef CONFIG_FIREWALL
597
                        if (call_out_firewall(PF_INET, skb->dev, iph, NULL) < FW_ACCEPT) {
598
                                /* The firewall wants us to dump the packet.
599
                                * We have to check this here, because
600
                                * the drop in ip_queue_xmit only catches the
601
                                * first time we send it. We must drop on
602
                                * every resend as well.
603
                                */
604
                                break;
605
                        }
606
#endif 
607
                        if (rt->rt_hh)
608
                        {
609
                                memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
610
                                if (!rt->rt_hh->hh_uptodate)
611
                                {
612
                                        skb->arp = 0;
613
#if RT_CACHE_DEBUG >= 2
614
                                        printk("tcp_do_retransmit(2): hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
615
#endif
616
                                }
617
                        }
618
                        else if (dev->hard_header)
619
                        {
620
                                if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
621
                                        skb->arp=0;
622
                        }
623
 
624
                        /*
625
                         *      This is not the right way to handle this. We have to
626
                         *      issue an up to date window and ack report with this
627
                         *      retransmit to keep the odd buggy tcp that relies on
628
                         *      the fact BSD does this happy.
629
                         *      We don't however need to recalculate the entire
630
                         *      checksum, so someone wanting a small problem to play
631
                         *      with might like to implement RFC1141/RFC1624 and speed
632
                         *      this up by avoiding a full checksum.
633
                         */
634
 
635
                        th->ack_seq = htonl(sk->acked_seq);
636
                        clear_delayed_acks(sk);
637
                        th->window = ntohs(tcp_select_window(sk));
638
                        tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
639
 
640
                        /*
641
                         *      If the interface is (still) up and running, kick it.
642
                         */
643
 
644
                        if (dev->flags & IFF_UP)
645
                        {
646
                                /*
647
                                 *      If the packet is still being sent by the device/protocol
648
                                 *      below then don't retransmit. This is both needed, and good -
649
                                 *      especially with connected mode AX.25 where it stops resends
650
                                 *      occurring of an as yet unsent anyway frame!
651
                                 *      We still add up the counts as the round trip time wants
652
                                 *      adjusting.
653
                                 */
654
                                if (!skb_device_locked(skb))
655
                                {
656
                                        /* Now queue it */
657
                                        ip_statistics.IpOutRequests++;
658
                                        dev_queue_xmit(skb, dev, sk->priority);
659
                                        sk->packets_out++;
660
                                } else {
661
                                        /* This shouldn't happen as we skip out above if the buffer is locked */
662
                                        printk(KERN_WARNING "tcp_do_retransmit: sk_buff (%p) became locked\n", skb);
663
                                }
664
                        }
665
                }
666
 
667
                /*
668
                 *      Count retransmissions
669
                 */
670
 
671
                sk->prot->retransmits++;
672
                tcp_statistics.TcpRetransSegs++;
673
 
674
                /*
675
                 * Record the high sequence number to help avoid doing
676
                 * to much fast retransmission.
677
                 */
678
                if (sk->retransmits)
679
                        sk->high_seq = sk->sent_seq;
680
 
681
                /*
682
                 * Advance the send_next pointer so we don't keep
683
                 * retransmitting the same stuff every time we get an ACK.
684
                 */
685
                sk->send_next = skb->link3;
686
 
687
                /*
688
                 *      Only one retransmit requested.
689
                 */
690
 
691
                if (!all)
692
                        break;
693
 
694
                /*
695
                 *      This should cut it off before we send too many packets.
696
                 */
697
 
698
                if (sk->packets_out >= sk->cong_window)
699
                        break;
700
 
701
                skb = skb->link3;
702
        }
703
}
704
 
705
/*
706
 *      This routine will send an RST to the other tcp.
707
 */
708
 
709
void tcp_send_reset(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
710
          struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
711
{
712
        struct sk_buff *buff;
713
        struct tcphdr *t1;
714
        int tmp;
715
        struct device *ndev=NULL;
716
 
717
        /*
718
         *      Cannot reset a reset (Think about it).
719
         */
720
 
721
        if(th->rst)
722
                return;
723
 
724
        /*
725
         * We need to grab some memory, and put together an RST,
726
         * and then put it into the queue to be sent.
727
         */
728
 
729
        buff = alloc_skb(MAX_RESET_SIZE, GFP_ATOMIC);
730
        if (buff == NULL)
731
                return;
732
 
733
        buff->sk = NULL;
734
        buff->dev = dev;
735
        buff->localroute = 0;
736
        buff->csum = 0;
737
 
738
        /*
739
         *      Put in the IP header and routing stuff.
740
         */
741
 
742
        tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
743
                           sizeof(struct tcphdr),tos,ttl,NULL);
744
        if (tmp < 0)
745
        {
746
                buff->free = 1;
747
                sock_wfree(NULL, buff);
748
                return;
749
        }
750
 
751
        t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
752
        memset(t1, 0, sizeof(*t1));
753
 
754
        /*
755
         *      Swap the send and the receive.
756
         */
757
 
758
        t1->dest = th->source;
759
        t1->source = th->dest;
760
        t1->doff = sizeof(*t1)/4;
761
        t1->rst = 1;
762
 
763
        if(th->ack)
764
        {
765
                t1->seq = th->ack_seq;
766
        }
767
        else
768
        {
769
                t1->ack = 1;
770
                if(!th->syn)
771
                        t1->ack_seq = th->seq;
772
                else
773
                        t1->ack_seq = htonl(ntohl(th->seq)+1);
774
        }
775
 
776
        tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
777
        prot->queue_xmit(NULL, ndev, buff, 1);
778
        tcp_statistics.TcpOutSegs++;
779
}
780
 
781
#ifdef CONFIG_RST_COOKIES
782
/*
783
 *      This routine will send a bad SYNACK to the remote tcp
784
 *      containing a secure sequence number.
785
 *      This should evoke a reset with a cookie, so we can verify
786
 *      the existence of the remote machine.
787
 */
788
 
789
void tcp_send_synack_probe(unsigned long saddr, unsigned long daddr, struct tcphdr *th,
790
          struct proto *prot, struct options *opt, struct device *dev, int tos, int ttl)
791
{
792
        struct sk_buff *buff;
793
        struct tcphdr *t1;
794
        int tmp;
795
        struct device *ndev=NULL;
796
 
797
        /*
798
         * We need to grab some memory, and put together a SYNACK,
799
         * and then put it into the queue to be sent.
800
         */
801
 
802
        buff = alloc_skb(MAX_SYN_SIZE, GFP_ATOMIC);
803
        if (buff == NULL)
804
                return;
805
 
806
        buff->sk = NULL;
807
        buff->dev = dev;
808
        buff->localroute = 0;
809
        buff->csum = 0;
810
 
811
        /*
812
         *      Put in the IP header and routing stuff.
813
         */
814
 
815
        tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
816
                           sizeof(struct tcphdr),tos,ttl,NULL);
817
        if (tmp < 0)
818
        {
819
                buff->free = 1;
820
                sock_wfree(NULL, buff);
821
                return;
822
        }
823
 
824
        t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
825
 
826
        memcpy(t1, th, sizeof(*t1));
827
        /*
828
         *      Swap the send and the receive.
829
         */
830
        t1->dest = th->source;
831
        t1->source = th->dest;
832
        t1->ack_seq = t1->seq = htonl(secure_tcp_probe_number(daddr,saddr,
833
                ntohs(th->source),ntohs(th->dest),ntohl(th->seq),0));
834
        t1->window = htons(1024);       /* make up a window here. */
835
        t1->syn = 1;
836
        t1->ack = 1;
837
        t1->urg = 0;
838
        t1->rst = 0;
839
        t1->psh = 0;
840
        t1->fin = 0;             /* In case someone sent us a SYN|FIN frame! */
841
        t1->doff = sizeof(*t1)/4;
842
        t1->res1 = 0;    /* RFC requires this, we upset ECN without it */
843
        t1->res2 = 0;
844
 
845
        tcp_send_check(t1, saddr, daddr, sizeof(*t1), buff);
846
        prot->queue_xmit(NULL, ndev, buff, 1);
847
        tcp_statistics.TcpOutSegs++;
848
}
849
#endif
850
 
851
/*
852
 *      Send a fin.
853
 */
854
 
855
void tcp_send_fin(struct sock *sk)
856
{
857
        struct proto *prot =(struct proto *)sk->prot;
858
        struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
859
        struct tcphdr *t1;
860
        struct sk_buff *buff;
861
        struct device *dev=sk->bound_device;
862
        int tmp;
863
 
864
        buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL);
865
 
866
        if (buff == NULL)
867
        {
868
                /* This is a disaster if it occurs */
869
                printk(KERN_CRIT "tcp_send_fin: Impossible malloc failure");
870
                return;
871
        }
872
 
873
        /*
874
         *      Administrivia
875
         */
876
 
877
        buff->sk = sk;
878
        buff->localroute = sk->localroute;
879
        buff->csum = 0;
880
 
881
        /*
882
         *      Put in the IP header and routing stuff.
883
         */
884
 
885
        tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
886
                           IPPROTO_TCP, sk->opt,
887
                           sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
888
        if (tmp < 0)
889
        {
890
                /* Oh oh. We couldn't route the packet, and we can't afford
891
                 * to drop it from the queue, since we will fail to retransmit
892
                 * then, and we never try to initiate a close again.
893
                 * Drop it onto the loopback device. The worst thing that
894
                 * happens is that the send gets droped when it comes out the
895
                 * the other side. If we get lucky it might even get forward
896
                 * to its real destination.
897
                 * WARNING: there are a few subtle points here.
898
                 * 1) We assume that if we build the header using the
899
                 *    loopback we can not fail. The only way this can happen
900
                 *    right now is if someone marks the loopback as
901
                 *    a gateway. This should never happen. Be careful
902
                 *    not to change that without taking this case into account.
903
                 * 2) If we fail to queue up the FIN packet here we get
904
                 *    bitten later when we receive a simultaneous FIN.
905
                 *    See the comments in tcp_fin().
906
                 */
907
                dev = &loopback_dev;
908
                tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
909
                           IPPROTO_TCP, sk->opt,
910
                           sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
911
                if (tmp < 0) {
912
                        printk(KERN_CRIT "tcp_send_fin: Impossible loopback failure");
913
                        return;
914
                }
915
        }
916
 
917
        clear_delayed_acks(sk);
918
 
919
        /*
920
         *      We ought to check if the end of the queue is a buffer and
921
         *      if so simply add the fin to that buffer, not send it ahead.
922
         */
923
 
924
        t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
925
        buff->dev = dev;
926
        memcpy(t1, th, sizeof(*t1));
927
        buff->seq = sk->write_seq;
928
        sk->write_seq++;
929
        buff->end_seq = sk->write_seq;
930
        t1->seq = htonl(buff->seq);
931
        t1->ack_seq = htonl(sk->acked_seq);
932
        t1->window = htons(tcp_select_window(sk));
933
        t1->fin = 1;
934
        tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
935
 
936
        /*
937
         * If there is data in the write queue, the fin must be appended to
938
         * the write queue.
939
         */
940
 
941
        if (skb_peek(&sk->write_queue) != NULL)
942
        {
943
                buff->free = 0;
944
                if (buff->next != NULL)
945
                {
946
                        printk(KERN_ERR "tcp_send_fin: next != NULL\n");
947
                        skb_unlink(buff);
948
                }
949
                skb_queue_tail(&sk->write_queue, buff);
950
        }
951
        else
952
        {
953
                sk->sent_seq = sk->write_seq;
954
                sk->prot->queue_xmit(sk, dev, buff, 0);
955
                tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
956
        }
957
}
958
 
959
 
960
void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb, int destroy)
961
{
962
        struct tcphdr *t1;
963
        unsigned char *ptr;
964
        struct sk_buff * buff;
965
        struct device *ndev=newsk->bound_device;
966
        int tmp;
967
 
968
        buff = sock_wmalloc(newsk, MAX_SYN_SIZE, 1, GFP_ATOMIC);
969
        if (buff == NULL)
970
        {
971
                sk->err = ENOMEM;
972
                destroy_sock(newsk);
973
                kfree_skb(skb, FREE_READ);
974
                tcp_statistics.TcpAttemptFails++;
975
                return;
976
        }
977
 
978
        buff->sk = newsk;
979
        buff->localroute = newsk->localroute;
980
 
981
        /*
982
         *      Put in the IP header and routing stuff.
983
         */
984
 
985
        tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
986
                               IPPROTO_TCP, newsk->opt, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
987
 
988
        /*
989
         *      Something went wrong.
990
         */
991
 
992
        if (tmp < 0)
993
        {
994
                sk->err = tmp;
995
                buff->free = 1;
996
                kfree_skb(buff,FREE_WRITE);
997
                destroy_sock(newsk);
998
                skb->sk = sk;
999
                kfree_skb(skb, FREE_READ);
1000
                tcp_statistics.TcpAttemptFails++;
1001
                return;
1002
        }
1003
 
1004
        t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1005
 
1006
        memcpy(t1, skb->h.th, sizeof(*t1));
1007
        buff->seq = newsk->write_seq++;
1008
        buff->end_seq = newsk->write_seq;
1009
        /*
1010
         *      Swap the send and the receive.
1011
         */
1012
        t1->dest = skb->h.th->source;
1013
        t1->source = newsk->dummy_th.source;
1014
        t1->seq = ntohl(buff->seq);
1015
        newsk->sent_seq = newsk->write_seq;
1016
        t1->window = ntohs(tcp_select_window(newsk));
1017
        t1->syn = 1;
1018
        t1->ack = 1;
1019
        t1->urg = 0;
1020
        t1->rst = 0;
1021
        t1->psh = 0;
1022
        t1->ack_seq = htonl(newsk->acked_seq);
1023
        t1->doff = sizeof(*t1)/4+1;
1024
        t1->res1 = 0;
1025
        t1->res2 = 0;
1026
        ptr = skb_put(buff,4);
1027
        ptr[0] = 2;
1028
        ptr[1] = 4;
1029
        ptr[2] = ((newsk->mtu) >> 8) & 0xff;
1030
        ptr[3] =(newsk->mtu) & 0xff;
1031
        buff->csum = csum_partial(ptr, 4, 0);
1032
#ifdef CONFIG_SYN_COOKIES
1033
        /* Don't save buff on the newsk chain if we are going to destroy
1034
         * newsk anyway in a second, it just delays getting rid of newsk.
1035
         */
1036
        if (destroy) {
1037
                /* BUFF was charged to NEWSK, _this_ is what we want
1038
                 * to undo so the SYN cookie can be killed now.  SKB
1039
                 * is charged to SK, below we will undo that when
1040
                 * we kfree SKB.
1041
                 */
1042
                buff->sk = NULL;
1043
                atomic_sub(buff->truesize, &newsk->wmem_alloc);
1044
        }
1045
#endif
1046
        tcp_send_check(t1, newsk->saddr, newsk->daddr, sizeof(*t1)+4, buff);
1047
        if (destroy)
1048
                newsk->prot->queue_xmit(NULL, ndev, buff, 1);
1049
        else
1050
                newsk->prot->queue_xmit(newsk, ndev, buff, 0);
1051
 
1052
 
1053
#ifdef CONFIG_SYN_COOKIES
1054
        if (destroy) {
1055
                /*
1056
                 * Get rid of the newsk structure if this was a cookie.
1057
                 */
1058
                destroy_sock(newsk);
1059
                skb->sk = sk;
1060
                kfree_skb(skb, FREE_READ);
1061
        } else {
1062
#endif
1063
                tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
1064
                skb->sk = newsk;
1065
 
1066
                /*
1067
                 *      Charge the sock_buff to newsk.
1068
                 */
1069
                atomic_sub(skb->truesize, &sk->rmem_alloc);
1070
                atomic_add(skb->truesize, &newsk->rmem_alloc);
1071
 
1072
                skb_queue_tail(&sk->receive_queue,skb);
1073
                sk->ack_backlog++;
1074
#ifdef CONFIG_SYN_COOKIES
1075
        }
1076
#endif
1077
        tcp_statistics.TcpOutSegs++;
1078
}
1079
 
1080
/*
1081
 *      Set up the timers for sending a delayed ack..
1082
 *
1083
 *      rules for delaying an ack:
1084
 *      - delay time <= 0.5 HZ
1085
 *      - must send at least every 2 full sized packets
1086
 *      - we don't have a window update to send
1087
 *
1088
 *      additional thoughts:
1089
 *      - we should not delay sending an ACK if we have ato > 0.5 HZ.
1090
 *        My thinking about this is that in this case we will just be
1091
 *        systematically skewing the RTT calculation. (The rule about
1092
 *        sending every two full sized packets will never need to be
1093
 *        invoked, the delayed ack will be sent before the ATO timeout
1094
 *        every time. Of course, the relies on our having a good estimate
1095
 *        for packet interarrival times.)
1096
 */
1097
void tcp_send_delayed_ack(struct sock * sk, int max_timeout, unsigned long timeout)
1098
{
1099
        /* Calculate new timeout */
1100
        if (timeout > max_timeout)
1101
                timeout = max_timeout;
1102
        if (sk->bytes_rcv >= sk->max_unacked)
1103
                timeout = 0;
1104
        timeout += jiffies;
1105
 
1106
        /* Use new timeout only if there wasn't a older one earlier  */
1107
        if (!del_timer(&sk->delack_timer) || timeout < sk->delack_timer.expires)
1108
                sk->delack_timer.expires = timeout;
1109
 
1110
        sk->ack_backlog++;
1111
        add_timer(&sk->delack_timer);
1112
}
1113
 
1114
 
1115
 
1116
/*
1117
 *      This routine sends an ack and also updates the window.
1118
 */
1119
 
1120
void tcp_send_ack(struct sock *sk)
1121
{
1122
        struct sk_buff *buff;
1123
        struct tcphdr *t1;
1124
        struct device *dev = sk->bound_device;
1125
        int tmp;
1126
 
1127
        if(sk->zapped)
1128
                return;         /* We have been reset, we may not send again */
1129
 
1130
        /*
1131
         *      If we have nothing queued for transmit and the transmit timer
1132
         *      is on we are just doing an ACK timeout and need to switch
1133
         *      to a keepalive.
1134
         */
1135
 
1136
        clear_delayed_acks(sk);
1137
 
1138
        if (sk->send_head == NULL
1139
            && skb_queue_empty(&sk->write_queue)
1140
            && sk->ip_xmit_timeout == TIME_WRITE)
1141
        {
1142
                if (sk->keepopen)
1143
                        tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
1144
                else
1145
                        del_timer(&sk->retransmit_timer);
1146
        }
1147
 
1148
        /*
1149
         * We need to grab some memory, and put together an ack,
1150
         * and then put it into the queue to be sent.
1151
         */
1152
 
1153
        buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
1154
        if (buff == NULL)
1155
        {
1156
                /*
1157
                 *      Force it to send an ack. We don't have to do this
1158
                 *      (ACK is unreliable) but it's much better use of
1159
                 *      bandwidth on slow links to send a spare ack than
1160
                 *      resend packets.
1161
                 */
1162
 
1163
                tcp_send_delayed_ack(sk, HZ/2, HZ/2);
1164
                return;
1165
        }
1166
 
1167
        /*
1168
         *      Assemble a suitable TCP frame
1169
         */
1170
 
1171
        buff->sk = sk;
1172
        buff->localroute = sk->localroute;
1173
        buff->csum = 0;
1174
 
1175
        /*
1176
         *      Put in the IP header and routing stuff.
1177
         */
1178
 
1179
        tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1180
                                IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1181
        if (tmp < 0)
1182
        {
1183
                buff->free = 1;
1184
                sock_wfree(sk, buff);
1185
                return;
1186
        }
1187
 
1188
#ifndef CONFIG_NO_PATH_MTU_DISCOVERY
1189
        buff->ip_hdr->frag_off |= htons(IP_DF);
1190
#endif
1191
 
1192
        t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1193
 
1194
        /*
1195
         *      Fill in the packet and send it
1196
         */
1197
 
1198
        memcpy(t1, &sk->dummy_th, sizeof(*t1));
1199
        t1->seq     = htonl(sk->sent_seq);
1200
        t1->ack_seq = htonl(sk->acked_seq);
1201
        t1->window  = htons(tcp_select_window(sk));
1202
 
1203
        tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1204
        if (sk->debug)
1205
                 printk(KERN_ERR "\rtcp_ack: seq %x ack %x\n", sk->sent_seq, sk->acked_seq);
1206
        sk->prot->queue_xmit(sk, dev, buff, 1);
1207
        tcp_statistics.TcpOutSegs++;
1208
}
1209
 
1210
/*
1211
 *      This routine sends a packet with an out of date sequence
1212
 *      number. It assumes the other end will try to ack it.
1213
 */
1214
 
1215
void tcp_write_wakeup(struct sock *sk)
1216
{
1217
        struct sk_buff *buff,*skb;
1218
        struct tcphdr *t1;
1219
        struct device *dev=sk->bound_device;
1220
        int tmp;
1221
 
1222
        if (sk->zapped)
1223
                return; /* After a valid reset we can send no more */
1224
 
1225
        /*
1226
         *      Write data can still be transmitted/retransmitted in the
1227
         *      following states.  If any other state is encountered, return.
1228
         *      [listen/close will never occur here anyway]
1229
         */
1230
 
1231
        if (sk->state != TCP_ESTABLISHED &&
1232
            sk->state != TCP_CLOSE_WAIT &&
1233
            sk->state != TCP_FIN_WAIT1 &&
1234
            sk->state != TCP_LAST_ACK &&
1235
            sk->state != TCP_CLOSING
1236
        )
1237
        {
1238
                return;
1239
        }
1240
        if ( before(sk->sent_seq, sk->window_seq) &&
1241
            (skb=skb_peek(&sk->write_queue)))
1242
        {
1243
                /*
1244
                 * We are probing the opening of a window
1245
                 * but the window size is != 0
1246
                 * must have been a result SWS avoidance ( sender )
1247
                 */
1248
 
1249
                struct iphdr *iph;
1250
                struct tcphdr *th;
1251
                struct tcphdr *nth;
1252
                unsigned long win_size;
1253
#if 0
1254
                unsigned long ow_size;
1255
#endif
1256
 
1257
                /*
1258
                 *      Recover the buffer pointers
1259
                 */
1260
 
1261
                iph = (struct iphdr *)skb->ip_hdr;
1262
                th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
1263
 
1264
                /*
1265
                 *      How many bytes can we send ?
1266
                 */
1267
 
1268
                /* During window probes, don't try to send more than is
1269
                 * actually in the skb we've taken off the send queue here.
1270
                 */
1271
                win_size = skb->len - (((unsigned char *) th) - skb->data);
1272
                win_size -= th->doff * 4;
1273
 
1274
                /* Don't send more than the offered window! */
1275
                win_size = min(win_size, sk->window_seq - sk->sent_seq);
1276
 
1277
                /*
1278
                 *      Grab the data for a temporary frame
1279
                 */
1280
 
1281
                buff = sock_wmalloc(sk, win_size + th->doff * 4 +
1282
                                     (iph->ihl << 2) +
1283
                                     sk->prot->max_header + 15,
1284
                                     1, GFP_ATOMIC);
1285
                if ( buff == NULL )
1286
                        return;
1287
 
1288
                /*
1289
                 *      If we strip the packet on the write queue we must
1290
                 *      be ready to retransmit this one
1291
                 */
1292
 
1293
                buff->free = /*0*/1;
1294
 
1295
                buff->sk = sk;
1296
                buff->localroute = sk->localroute;
1297
 
1298
                /*
1299
                 *      Put headers on the new packet
1300
                 */
1301
 
1302
                tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1303
                                         IPPROTO_TCP, sk->opt, buff->truesize,
1304
                                         sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1305
                if (tmp < 0)
1306
                {
1307
                        sock_wfree(sk, buff);
1308
                        return;
1309
                }
1310
 
1311
                /*
1312
                 *      Move the TCP header over
1313
                 */
1314
 
1315
                buff->dev = dev;
1316
 
1317
                nth = (struct tcphdr *) skb_put(buff,sizeof(*th));
1318
 
1319
                memcpy(nth, th, sizeof(*th));
1320
 
1321
                /*
1322
                 *      Correct the new header
1323
                 */
1324
 
1325
                nth->ack = 1;
1326
                nth->ack_seq = htonl(sk->acked_seq);
1327
                nth->window = htons(tcp_select_window(sk));
1328
                nth->check = 0;
1329
 
1330
                /*
1331
                 *      Copy TCP options and data start to our new buffer
1332
                 */
1333
 
1334
                buff->csum = csum_partial_copy((void *)(th + 1), skb_put(buff,win_size),
1335
                                win_size + th->doff*4 - sizeof(*th), 0);
1336
 
1337
                /*
1338
                 *      Remember our right edge sequence number.
1339
                 */
1340
 
1341
                buff->end_seq = sk->sent_seq + win_size;
1342
                sk->sent_seq = buff->end_seq;           /* Hack */
1343
                if(th->urg && ntohs(th->urg_ptr) < win_size)
1344
                        nth->urg = 0;
1345
 
1346
                /*
1347
                 *      Checksum the split buffer
1348
                 */
1349
 
1350
                tcp_send_check(nth, sk->saddr, sk->daddr,
1351
                           nth->doff * 4 + win_size , buff);
1352
        }
1353
        else
1354
        {
1355
                buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1356
                if (buff == NULL)
1357
                        return;
1358
 
1359
                buff->free = 1;
1360
                buff->sk = sk;
1361
                buff->localroute = sk->localroute;
1362
                buff->csum = 0;
1363
 
1364
                /*
1365
                 *      Put in the IP header and routing stuff.
1366
                 */
1367
 
1368
                tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1369
                                IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1370
                if (tmp < 0)
1371
                {
1372
                        sock_wfree(sk, buff);
1373
                        return;
1374
                }
1375
 
1376
                t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1377
                memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1378
 
1379
                /*
1380
                 *      Use a previous sequence.
1381
                 *      This should cause the other end to send an ack.
1382
                 */
1383
 
1384
                t1->seq = htonl(sk->sent_seq-1);
1385
/*              t1->fin = 0;    -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */
1386
                t1->ack_seq = htonl(sk->acked_seq);
1387
                t1->window = htons(tcp_select_window(sk));
1388
                tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff);
1389
 
1390
        }
1391
 
1392
        /*
1393
         *      Send it.
1394
         */
1395
 
1396
        sk->prot->queue_xmit(sk, dev, buff, 1);
1397
        tcp_statistics.TcpOutSegs++;
1398
}
1399
 
1400
/*
1401
 *      A window probe timeout has occurred.
1402
 */
1403
 
1404
void tcp_send_probe0(struct sock *sk)
1405
{
1406
        if (sk->zapped)
1407
                return;         /* After a valid reset we can send no more */
1408
 
1409
        tcp_write_wakeup(sk);
1410
 
1411
        sk->backoff++;
1412
        sk->rto = min(sk->rto << 1, 120*HZ);
1413
        sk->retransmits++;
1414
        sk->prot->retransmits ++;
1415
        tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto);
1416
}
1417
 
1418
/*
1419
 * Remove the portion of a packet that has already been sent.
1420
 * Needed to deal with buggy TCP implementations that can't deal
1421
 * with seeing a packet that contains some data that has already
1422
 * been received.
1423
 *
1424
 * Note that the SYN sequence number is at the start of the packet
1425
 * while the FIN is at the end. This means that we always clear out
1426
 * the SYN bit, and never clear out the FIN bit.
1427
 */
1428
void tcp_shrink_skb(struct sock *sk, struct sk_buff *skb, u32 ack)
1429
{
1430
        struct iphdr *iph;
1431
        struct tcphdr *th;
1432
        unsigned char *old, *new;
1433
        unsigned long len;
1434
        int diff;
1435
 
1436
        /*
1437
         *      Recover the buffer pointers
1438
         */
1439
 
1440
        iph = (struct iphdr *)skb->ip_hdr;
1441
        th = (struct tcphdr *)(((char *)iph) +(iph->ihl << 2));
1442
 
1443
        /* how much data are we droping from the tcp frame */
1444
        diff = ack - (skb->seq + th->syn);
1445
        /* how much data are we keeping in the tcp frame */
1446
        len = (skb->end_seq - th->fin) - ack;
1447
 
1448
        /* pointers to new start of remaining data, and old start */
1449
        new = (unsigned char *)th + th->doff*4;
1450
        old = new+diff;
1451
 
1452
        /* Update our starting seq number */
1453
        skb->seq = ack;
1454
        th->seq = htonl(ack);
1455
        th->syn = 0;             /* Turn SYN off as it is logically at the start of the packet */
1456
 
1457
        iph->tot_len = htons(ntohs(iph->tot_len)-diff);
1458
        ip_send_check(iph);
1459
 
1460
        /* Get the partial checksum for the IP options */
1461
        if (th->doff*4 - sizeof(*th) > 0)
1462
                skb->csum = csum_partial((void *)(th+1),
1463
                                th->doff*4-sizeof(*th),0);
1464
        else
1465
                skb->csum = 0;
1466
 
1467
        /* Copy the good data down and get it's checksum */
1468
        skb->csum = csum_partial_copy((void *)old,(void *)new,len,skb->csum);
1469
 
1470
        /* shorten the skb */
1471
        skb_trim(skb,skb->len-diff);
1472
 
1473
        /* Checksum the shrunk buffer */
1474
        tcp_send_check(th, sk->saddr, sk->daddr,
1475
                   th->doff * 4 + len , skb);
1476
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.