/*
|
/*
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
* operating system. INET is implemented using the BSD Socket
|
* operating system. INET is implemented using the BSD Socket
|
* interface as the means of communication with the user level.
|
* interface as the means of communication with the user level.
|
*
|
*
|
* Implementation of the Transmission Control Protocol(TCP).
|
* Implementation of the Transmission Control Protocol(TCP).
|
*
|
*
|
* Version: $Id: tcp.c,v 1.1.1.1 2004-04-15 01:13:34 phoenix Exp $
|
* Version: $Id: tcp.c,v 1.1.1.1 2004-04-15 01:13:34 phoenix Exp $
|
*
|
*
|
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
|
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
|
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
|
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
|
* Mark Evans, <evansmp@uhura.aston.ac.uk>
|
* Mark Evans, <evansmp@uhura.aston.ac.uk>
|
* Corey Minyard <wf-rch!minyard@relay.EU.net>
|
* Corey Minyard <wf-rch!minyard@relay.EU.net>
|
* Florian La Roche, <flla@stud.uni-sb.de>
|
* Florian La Roche, <flla@stud.uni-sb.de>
|
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
|
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
|
* Linus Torvalds, <torvalds@cs.helsinki.fi>
|
* Linus Torvalds, <torvalds@cs.helsinki.fi>
|
* Alan Cox, <gw4pts@gw4pts.ampr.org>
|
* Alan Cox, <gw4pts@gw4pts.ampr.org>
|
* Matthew Dillon, <dillon@apollo.west.oic.com>
|
* Matthew Dillon, <dillon@apollo.west.oic.com>
|
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
|
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
|
* Jorge Cwik, <jorge@laser.satlink.net>
|
* Jorge Cwik, <jorge@laser.satlink.net>
|
*
|
*
|
* Fixes:
|
* Fixes:
|
* Alan Cox : Numerous verify_area() calls
|
* Alan Cox : Numerous verify_area() calls
|
* Alan Cox : Set the ACK bit on a reset
|
* Alan Cox : Set the ACK bit on a reset
|
* Alan Cox : Stopped it crashing if it closed while
|
* Alan Cox : Stopped it crashing if it closed while
|
* sk->inuse=1 and was trying to connect
|
* sk->inuse=1 and was trying to connect
|
* (tcp_err()).
|
* (tcp_err()).
|
* Alan Cox : All icmp error handling was broken
|
* Alan Cox : All icmp error handling was broken
|
* pointers passed where wrong and the
|
* pointers passed where wrong and the
|
* socket was looked up backwards. Nobody
|
* socket was looked up backwards. Nobody
|
* tested any icmp error code obviously.
|
* tested any icmp error code obviously.
|
* Alan Cox : tcp_err() now handled properly. It
|
* Alan Cox : tcp_err() now handled properly. It
|
* wakes people on errors. poll
|
* wakes people on errors. poll
|
* behaves and the icmp error race
|
* behaves and the icmp error race
|
* has gone by moving it into sock.c
|
* has gone by moving it into sock.c
|
* Alan Cox : tcp_send_reset() fixed to work for
|
* Alan Cox : tcp_send_reset() fixed to work for
|
* everything not just packets for
|
* everything not just packets for
|
* unknown sockets.
|
* unknown sockets.
|
* Alan Cox : tcp option processing.
|
* Alan Cox : tcp option processing.
|
* Alan Cox : Reset tweaked (still not 100%) [Had
|
* Alan Cox : Reset tweaked (still not 100%) [Had
|
* syn rule wrong]
|
* syn rule wrong]
|
* Herp Rosmanith : More reset fixes
|
* Herp Rosmanith : More reset fixes
|
* Alan Cox : No longer acks invalid rst frames.
|
* Alan Cox : No longer acks invalid rst frames.
|
* Acking any kind of RST is right out.
|
* Acking any kind of RST is right out.
|
* Alan Cox : Sets an ignore me flag on an rst
|
* Alan Cox : Sets an ignore me flag on an rst
|
* receive otherwise odd bits of prattle
|
* receive otherwise odd bits of prattle
|
* escape still
|
* escape still
|
* Alan Cox : Fixed another acking RST frame bug.
|
* Alan Cox : Fixed another acking RST frame bug.
|
* Should stop LAN workplace lockups.
|
* Should stop LAN workplace lockups.
|
* Alan Cox : Some tidyups using the new skb list
|
* Alan Cox : Some tidyups using the new skb list
|
* facilities
|
* facilities
|
* Alan Cox : sk->keepopen now seems to work
|
* Alan Cox : sk->keepopen now seems to work
|
* Alan Cox : Pulls options out correctly on accepts
|
* Alan Cox : Pulls options out correctly on accepts
|
* Alan Cox : Fixed assorted sk->rqueue->next errors
|
* Alan Cox : Fixed assorted sk->rqueue->next errors
|
* Alan Cox : PSH doesn't end a TCP read. Switched a
|
* Alan Cox : PSH doesn't end a TCP read. Switched a
|
* bit to skb ops.
|
* bit to skb ops.
|
* Alan Cox : Tidied tcp_data to avoid a potential
|
* Alan Cox : Tidied tcp_data to avoid a potential
|
* nasty.
|
* nasty.
|
* Alan Cox : Added some better commenting, as the
|
* Alan Cox : Added some better commenting, as the
|
* tcp is hard to follow
|
* tcp is hard to follow
|
* Alan Cox : Removed incorrect check for 20 * psh
|
* Alan Cox : Removed incorrect check for 20 * psh
|
* Michael O'Reilly : ack < copied bug fix.
|
* Michael O'Reilly : ack < copied bug fix.
|
* Johannes Stille : Misc tcp fixes (not all in yet).
|
* Johannes Stille : Misc tcp fixes (not all in yet).
|
* Alan Cox : FIN with no memory -> CRASH
|
* Alan Cox : FIN with no memory -> CRASH
|
* Alan Cox : Added socket option proto entries.
|
* Alan Cox : Added socket option proto entries.
|
* Also added awareness of them to accept.
|
* Also added awareness of them to accept.
|
* Alan Cox : Added TCP options (SOL_TCP)
|
* Alan Cox : Added TCP options (SOL_TCP)
|
* Alan Cox : Switched wakeup calls to callbacks,
|
* Alan Cox : Switched wakeup calls to callbacks,
|
* so the kernel can layer network
|
* so the kernel can layer network
|
* sockets.
|
* sockets.
|
* Alan Cox : Use ip_tos/ip_ttl settings.
|
* Alan Cox : Use ip_tos/ip_ttl settings.
|
* Alan Cox : Handle FIN (more) properly (we hope).
|
* Alan Cox : Handle FIN (more) properly (we hope).
|
* Alan Cox : RST frames sent on unsynchronised
|
* Alan Cox : RST frames sent on unsynchronised
|
* state ack error.
|
* state ack error.
|
* Alan Cox : Put in missing check for SYN bit.
|
* Alan Cox : Put in missing check for SYN bit.
|
* Alan Cox : Added tcp_select_window() aka NET2E
|
* Alan Cox : Added tcp_select_window() aka NET2E
|
* window non shrink trick.
|
* window non shrink trick.
|
* Alan Cox : Added a couple of small NET2E timer
|
* Alan Cox : Added a couple of small NET2E timer
|
* fixes
|
* fixes
|
* Charles Hedrick : TCP fixes
|
* Charles Hedrick : TCP fixes
|
* Toomas Tamm : TCP window fixes
|
* Toomas Tamm : TCP window fixes
|
* Alan Cox : Small URG fix to rlogin ^C ack fight
|
* Alan Cox : Small URG fix to rlogin ^C ack fight
|
* Charles Hedrick : Rewrote most of it to actually work
|
* Charles Hedrick : Rewrote most of it to actually work
|
* Linus : Rewrote tcp_read() and URG handling
|
* Linus : Rewrote tcp_read() and URG handling
|
* completely
|
* completely
|
* Gerhard Koerting: Fixed some missing timer handling
|
* Gerhard Koerting: Fixed some missing timer handling
|
* Matthew Dillon : Reworked TCP machine states as per RFC
|
* Matthew Dillon : Reworked TCP machine states as per RFC
|
* Gerhard Koerting: PC/TCP workarounds
|
* Gerhard Koerting: PC/TCP workarounds
|
* Adam Caldwell : Assorted timer/timing errors
|
* Adam Caldwell : Assorted timer/timing errors
|
* Matthew Dillon : Fixed another RST bug
|
* Matthew Dillon : Fixed another RST bug
|
* Alan Cox : Move to kernel side addressing changes.
|
* Alan Cox : Move to kernel side addressing changes.
|
* Alan Cox : Beginning work on TCP fastpathing
|
* Alan Cox : Beginning work on TCP fastpathing
|
* (not yet usable)
|
* (not yet usable)
|
* Arnt Gulbrandsen: Turbocharged tcp_check() routine.
|
* Arnt Gulbrandsen: Turbocharged tcp_check() routine.
|
* Alan Cox : TCP fast path debugging
|
* Alan Cox : TCP fast path debugging
|
* Alan Cox : Window clamping
|
* Alan Cox : Window clamping
|
* Michael Riepe : Bug in tcp_check()
|
* Michael Riepe : Bug in tcp_check()
|
* Matt Dillon : More TCP improvements and RST bug fixes
|
* Matt Dillon : More TCP improvements and RST bug fixes
|
* Matt Dillon : Yet more small nasties remove from the
|
* Matt Dillon : Yet more small nasties remove from the
|
* TCP code (Be very nice to this man if
|
* TCP code (Be very nice to this man if
|
* tcp finally works 100%) 8)
|
* tcp finally works 100%) 8)
|
* Alan Cox : BSD accept semantics.
|
* Alan Cox : BSD accept semantics.
|
* Alan Cox : Reset on closedown bug.
|
* Alan Cox : Reset on closedown bug.
|
* Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
|
* Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
|
* Michael Pall : Handle poll() after URG properly in
|
* Michael Pall : Handle poll() after URG properly in
|
* all cases.
|
* all cases.
|
* Michael Pall : Undo the last fix in tcp_read_urg()
|
* Michael Pall : Undo the last fix in tcp_read_urg()
|
* (multi URG PUSH broke rlogin).
|
* (multi URG PUSH broke rlogin).
|
* Michael Pall : Fix the multi URG PUSH problem in
|
* Michael Pall : Fix the multi URG PUSH problem in
|
* tcp_readable(), poll() after URG
|
* tcp_readable(), poll() after URG
|
* works now.
|
* works now.
|
* Michael Pall : recv(...,MSG_OOB) never blocks in the
|
* Michael Pall : recv(...,MSG_OOB) never blocks in the
|
* BSD api.
|
* BSD api.
|
* Alan Cox : Changed the semantics of sk->socket to
|
* Alan Cox : Changed the semantics of sk->socket to
|
* fix a race and a signal problem with
|
* fix a race and a signal problem with
|
* accept() and async I/O.
|
* accept() and async I/O.
|
* Alan Cox : Relaxed the rules on tcp_sendto().
|
* Alan Cox : Relaxed the rules on tcp_sendto().
|
* Yury Shevchuk : Really fixed accept() blocking problem.
|
* Yury Shevchuk : Really fixed accept() blocking problem.
|
* Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
|
* Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
|
* clients/servers which listen in on
|
* clients/servers which listen in on
|
* fixed ports.
|
* fixed ports.
|
* Alan Cox : Cleaned the above up and shrank it to
|
* Alan Cox : Cleaned the above up and shrank it to
|
* a sensible code size.
|
* a sensible code size.
|
* Alan Cox : Self connect lockup fix.
|
* Alan Cox : Self connect lockup fix.
|
* Alan Cox : No connect to multicast.
|
* Alan Cox : No connect to multicast.
|
* Ross Biro : Close unaccepted children on master
|
* Ross Biro : Close unaccepted children on master
|
* socket close.
|
* socket close.
|
* Alan Cox : Reset tracing code.
|
* Alan Cox : Reset tracing code.
|
* Alan Cox : Spurious resets on shutdown.
|
* Alan Cox : Spurious resets on shutdown.
|
* Alan Cox : Giant 15 minute/60 second timer error
|
* Alan Cox : Giant 15 minute/60 second timer error
|
* Alan Cox : Small whoops in polling before an
|
* Alan Cox : Small whoops in polling before an
|
* accept.
|
* accept.
|
* Alan Cox : Kept the state trace facility since
|
* Alan Cox : Kept the state trace facility since
|
* it's handy for debugging.
|
* it's handy for debugging.
|
* Alan Cox : More reset handler fixes.
|
* Alan Cox : More reset handler fixes.
|
* Alan Cox : Started rewriting the code based on
|
* Alan Cox : Started rewriting the code based on
|
* the RFC's for other useful protocol
|
* the RFC's for other useful protocol
|
* references see: Comer, KA9Q NOS, and
|
* references see: Comer, KA9Q NOS, and
|
* for a reference on the difference
|
* for a reference on the difference
|
* between specifications and how BSD
|
* between specifications and how BSD
|
* works see the 4.4lite source.
|
* works see the 4.4lite source.
|
* A.N.Kuznetsov : Don't time wait on completion of tidy
|
* A.N.Kuznetsov : Don't time wait on completion of tidy
|
* close.
|
* close.
|
* Linus Torvalds : Fin/Shutdown & copied_seq changes.
|
* Linus Torvalds : Fin/Shutdown & copied_seq changes.
|
* Linus Torvalds : Fixed BSD port reuse to work first syn
|
* Linus Torvalds : Fixed BSD port reuse to work first syn
|
* Alan Cox : Reimplemented timers as per the RFC
|
* Alan Cox : Reimplemented timers as per the RFC
|
* and using multiple timers for sanity.
|
* and using multiple timers for sanity.
|
* Alan Cox : Small bug fixes, and a lot of new
|
* Alan Cox : Small bug fixes, and a lot of new
|
* comments.
|
* comments.
|
* Alan Cox : Fixed dual reader crash by locking
|
* Alan Cox : Fixed dual reader crash by locking
|
* the buffers (much like datagram.c)
|
* the buffers (much like datagram.c)
|
* Alan Cox : Fixed stuck sockets in probe. A probe
|
* Alan Cox : Fixed stuck sockets in probe. A probe
|
* now gets fed up of retrying without
|
* now gets fed up of retrying without
|
* (even a no space) answer.
|
* (even a no space) answer.
|
* Alan Cox : Extracted closing code better
|
* Alan Cox : Extracted closing code better
|
* Alan Cox : Fixed the closing state machine to
|
* Alan Cox : Fixed the closing state machine to
|
* resemble the RFC.
|
* resemble the RFC.
|
* Alan Cox : More 'per spec' fixes.
|
* Alan Cox : More 'per spec' fixes.
|
* Jorge Cwik : Even faster checksumming.
|
* Jorge Cwik : Even faster checksumming.
|
* Alan Cox : tcp_data() doesn't ack illegal PSH
|
* Alan Cox : tcp_data() doesn't ack illegal PSH
|
* only frames. At least one pc tcp stack
|
* only frames. At least one pc tcp stack
|
* generates them.
|
* generates them.
|
* Alan Cox : Cache last socket.
|
* Alan Cox : Cache last socket.
|
* Alan Cox : Per route irtt.
|
* Alan Cox : Per route irtt.
|
* Matt Day : poll()->select() match BSD precisely on error
|
* Matt Day : poll()->select() match BSD precisely on error
|
* Alan Cox : New buffers
|
* Alan Cox : New buffers
|
* Marc Tamsky : Various sk->prot->retransmits and
|
* Marc Tamsky : Various sk->prot->retransmits and
|
* sk->retransmits misupdating fixed.
|
* sk->retransmits misupdating fixed.
|
* Fixed tcp_write_timeout: stuck close,
|
* Fixed tcp_write_timeout: stuck close,
|
* and TCP syn retries gets used now.
|
* and TCP syn retries gets used now.
|
* Mark Yarvis : In tcp_read_wakeup(), don't send an
|
* Mark Yarvis : In tcp_read_wakeup(), don't send an
|
* ack if state is TCP_CLOSED.
|
* ack if state is TCP_CLOSED.
|
* Alan Cox : Look up device on a retransmit - routes may
|
* Alan Cox : Look up device on a retransmit - routes may
|
* change. Doesn't yet cope with MSS shrink right
|
* change. Doesn't yet cope with MSS shrink right
|
* but its a start!
|
* but its a start!
|
* Marc Tamsky : Closing in closing fixes.
|
* Marc Tamsky : Closing in closing fixes.
|
* Mike Shaver : RFC1122 verifications.
|
* Mike Shaver : RFC1122 verifications.
|
* Alan Cox : rcv_saddr errors.
|
* Alan Cox : rcv_saddr errors.
|
* Alan Cox : Block double connect().
|
* Alan Cox : Block double connect().
|
* Alan Cox : Small hooks for enSKIP.
|
* Alan Cox : Small hooks for enSKIP.
|
* Alexey Kuznetsov: Path MTU discovery.
|
* Alexey Kuznetsov: Path MTU discovery.
|
* Alan Cox : Support soft errors.
|
* Alan Cox : Support soft errors.
|
* Alan Cox : Fix MTU discovery pathological case
|
* Alan Cox : Fix MTU discovery pathological case
|
* when the remote claims no mtu!
|
* when the remote claims no mtu!
|
* Marc Tamsky : TCP_CLOSE fix.
|
* Marc Tamsky : TCP_CLOSE fix.
|
* Colin (G3TNE) : Send a reset on syn ack replies in
|
* Colin (G3TNE) : Send a reset on syn ack replies in
|
* window but wrong (fixes NT lpd problems)
|
* window but wrong (fixes NT lpd problems)
|
* Pedro Roque : Better TCP window handling, delayed ack.
|
* Pedro Roque : Better TCP window handling, delayed ack.
|
* Joerg Reuter : No modification of locked buffers in
|
* Joerg Reuter : No modification of locked buffers in
|
* tcp_do_retransmit()
|
* tcp_do_retransmit()
|
* Eric Schenk : Changed receiver side silly window
|
* Eric Schenk : Changed receiver side silly window
|
* avoidance algorithm to BSD style
|
* avoidance algorithm to BSD style
|
* algorithm. This doubles throughput
|
* algorithm. This doubles throughput
|
* against machines running Solaris,
|
* against machines running Solaris,
|
* and seems to result in general
|
* and seems to result in general
|
* improvement.
|
* improvement.
|
* Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
|
* Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
|
* Willy Konynenberg : Transparent proxying support.
|
* Willy Konynenberg : Transparent proxying support.
|
* Mike McLagan : Routing by source
|
* Mike McLagan : Routing by source
|
* Keith Owens : Do proper merging with partial SKB's in
|
* Keith Owens : Do proper merging with partial SKB's in
|
* tcp_do_sendmsg to avoid burstiness.
|
* tcp_do_sendmsg to avoid burstiness.
|
* Eric Schenk : Fix fast close down bug with
|
* Eric Schenk : Fix fast close down bug with
|
* shutdown() followed by close().
|
* shutdown() followed by close().
|
* Andi Kleen : Make poll agree with SIGIO
|
* Andi Kleen : Make poll agree with SIGIO
|
* Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
|
* Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
|
* lingertime == 0 (RFC 793 ABORT Call)
|
* lingertime == 0 (RFC 793 ABORT Call)
|
*
|
*
|
* This program is free software; you can redistribute it and/or
|
* This program is free software; you can redistribute it and/or
|
* modify it under the terms of the GNU General Public License
|
* modify it under the terms of the GNU General Public License
|
* as published by the Free Software Foundation; either version
|
* as published by the Free Software Foundation; either version
|
* 2 of the License, or(at your option) any later version.
|
* 2 of the License, or(at your option) any later version.
|
*
|
*
|
* Description of States:
|
* Description of States:
|
*
|
*
|
* TCP_SYN_SENT sent a connection request, waiting for ack
|
* TCP_SYN_SENT sent a connection request, waiting for ack
|
*
|
*
|
* TCP_SYN_RECV received a connection request, sent ack,
|
* TCP_SYN_RECV received a connection request, sent ack,
|
* waiting for final ack in three-way handshake.
|
* waiting for final ack in three-way handshake.
|
*
|
*
|
* TCP_ESTABLISHED connection established
|
* TCP_ESTABLISHED connection established
|
*
|
*
|
* TCP_FIN_WAIT1 our side has shutdown, waiting to complete
|
* TCP_FIN_WAIT1 our side has shutdown, waiting to complete
|
* transmission of remaining buffered data
|
* transmission of remaining buffered data
|
*
|
*
|
* TCP_FIN_WAIT2 all buffered data sent, waiting for remote
|
* TCP_FIN_WAIT2 all buffered data sent, waiting for remote
|
* to shutdown
|
* to shutdown
|
*
|
*
|
* TCP_CLOSING both sides have shutdown but we still have
|
* TCP_CLOSING both sides have shutdown but we still have
|
* data we have to finish sending
|
* data we have to finish sending
|
*
|
*
|
* TCP_TIME_WAIT timeout to catch resent junk before entering
|
* TCP_TIME_WAIT timeout to catch resent junk before entering
|
* closed, can only be entered from FIN_WAIT2
|
* closed, can only be entered from FIN_WAIT2
|
* or CLOSING. Required because the other end
|
* or CLOSING. Required because the other end
|
* may not have gotten our last ACK causing it
|
* may not have gotten our last ACK causing it
|
* to retransmit the data packet (which we ignore)
|
* to retransmit the data packet (which we ignore)
|
*
|
*
|
* TCP_CLOSE_WAIT remote side has shutdown and is waiting for
|
* TCP_CLOSE_WAIT remote side has shutdown and is waiting for
|
* us to finish writing our data and to shutdown
|
* us to finish writing our data and to shutdown
|
* (we have to close() to move on to LAST_ACK)
|
* (we have to close() to move on to LAST_ACK)
|
*
|
*
|
* TCP_LAST_ACK out side has shutdown after remote has
|
* TCP_LAST_ACK out side has shutdown after remote has
|
* shutdown. There may still be data in our
|
* shutdown. There may still be data in our
|
* buffer that we have to finish sending
|
* buffer that we have to finish sending
|
*
|
*
|
* TCP_CLOSE socket is finished
|
* TCP_CLOSE socket is finished
|
*/
|
*/
|
|
|
#include <linux/config.h>
|
#include <linux/config.h>
|
#include <linux/types.h>
|
#include <linux/types.h>
|
#include <linux/fcntl.h>
|
#include <linux/fcntl.h>
|
#include <linux/poll.h>
|
#include <linux/poll.h>
|
#include <linux/init.h>
|
#include <linux/init.h>
|
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
#include <linux/fs.h>
|
#include <linux/fs.h>
|
#include <linux/random.h>
|
#include <linux/random.h>
|
|
|
#include <net/icmp.h>
|
#include <net/icmp.h>
|
#include <net/tcp.h>
|
#include <net/tcp.h>
|
|
|
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
#include <asm/ioctls.h>
|
#include <asm/ioctls.h>
|
|
|
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
|
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
|
|
|
struct tcp_mib tcp_statistics[NR_CPUS*2];
|
struct tcp_mib tcp_statistics[NR_CPUS*2];
|
|
|
kmem_cache_t *tcp_openreq_cachep;
|
kmem_cache_t *tcp_openreq_cachep;
|
kmem_cache_t *tcp_bucket_cachep;
|
kmem_cache_t *tcp_bucket_cachep;
|
kmem_cache_t *tcp_timewait_cachep;
|
kmem_cache_t *tcp_timewait_cachep;
|
|
|
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
|
atomic_t tcp_orphan_count = ATOMIC_INIT(0);
|
|
|
int sysctl_tcp_mem[3];
|
int sysctl_tcp_mem[3];
|
int sysctl_tcp_wmem[3] = { 4*1024, 16*1024, 128*1024 };
|
int sysctl_tcp_wmem[3] = { 4*1024, 16*1024, 128*1024 };
|
int sysctl_tcp_rmem[3] = { 4*1024, 87380, 87380*2 };
|
int sysctl_tcp_rmem[3] = { 4*1024, 87380, 87380*2 };
|
|
|
atomic_t tcp_memory_allocated; /* Current allocated memory. */
|
atomic_t tcp_memory_allocated; /* Current allocated memory. */
|
atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
|
atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
|
|
|
/* Pressure flag: try to collapse.
|
/* Pressure flag: try to collapse.
|
* Technical note: it is used by multiple contexts non atomically.
|
* Technical note: it is used by multiple contexts non atomically.
|
* All the tcp_mem_schedule() is of this nature: accounting
|
* All the tcp_mem_schedule() is of this nature: accounting
|
* is strict, actions are advisory and have some latency. */
|
* is strict, actions are advisory and have some latency. */
|
int tcp_memory_pressure;
|
int tcp_memory_pressure;
|
|
|
#define TCP_PAGES(amt) (((amt)+TCP_MEM_QUANTUM-1)/TCP_MEM_QUANTUM)
|
#define TCP_PAGES(amt) (((amt)+TCP_MEM_QUANTUM-1)/TCP_MEM_QUANTUM)
|
|
|
int tcp_mem_schedule(struct sock *sk, int size, int kind)
|
int tcp_mem_schedule(struct sock *sk, int size, int kind)
|
{
|
{
|
int amt = TCP_PAGES(size);
|
int amt = TCP_PAGES(size);
|
|
|
sk->forward_alloc += amt*TCP_MEM_QUANTUM;
|
sk->forward_alloc += amt*TCP_MEM_QUANTUM;
|
atomic_add(amt, &tcp_memory_allocated);
|
atomic_add(amt, &tcp_memory_allocated);
|
|
|
/* Under limit. */
|
/* Under limit. */
|
if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
|
if (atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
|
if (tcp_memory_pressure)
|
if (tcp_memory_pressure)
|
tcp_memory_pressure = 0;
|
tcp_memory_pressure = 0;
|
return 1;
|
return 1;
|
}
|
}
|
|
|
/* Over hard limit. */
|
/* Over hard limit. */
|
if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
|
if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) {
|
tcp_enter_memory_pressure();
|
tcp_enter_memory_pressure();
|
goto suppress_allocation;
|
goto suppress_allocation;
|
}
|
}
|
|
|
/* Under pressure. */
|
/* Under pressure. */
|
if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
|
if (atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[1])
|
tcp_enter_memory_pressure();
|
tcp_enter_memory_pressure();
|
|
|
if (kind) {
|
if (kind) {
|
if (atomic_read(&sk->rmem_alloc) < sysctl_tcp_rmem[0])
|
if (atomic_read(&sk->rmem_alloc) < sysctl_tcp_rmem[0])
|
return 1;
|
return 1;
|
} else {
|
} else {
|
if (sk->wmem_queued < sysctl_tcp_wmem[0])
|
if (sk->wmem_queued < sysctl_tcp_wmem[0])
|
return 1;
|
return 1;
|
}
|
}
|
|
|
if (!tcp_memory_pressure ||
|
if (!tcp_memory_pressure ||
|
sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated)
|
sysctl_tcp_mem[2] > atomic_read(&tcp_sockets_allocated)
|
* TCP_PAGES(sk->wmem_queued+atomic_read(&sk->rmem_alloc)+
|
* TCP_PAGES(sk->wmem_queued+atomic_read(&sk->rmem_alloc)+
|
sk->forward_alloc))
|
sk->forward_alloc))
|
return 1;
|
return 1;
|
|
|
suppress_allocation:
|
suppress_allocation:
|
|
|
if (kind == 0) {
|
if (kind == 0) {
|
tcp_moderate_sndbuf(sk);
|
tcp_moderate_sndbuf(sk);
|
|
|
/* Fail only if socket is _under_ its sndbuf.
|
/* Fail only if socket is _under_ its sndbuf.
|
* In this case we cannot block, so that we have to fail.
|
* In this case we cannot block, so that we have to fail.
|
*/
|
*/
|
if (sk->wmem_queued+size >= sk->sndbuf)
|
if (sk->wmem_queued+size >= sk->sndbuf)
|
return 1;
|
return 1;
|
}
|
}
|
|
|
/* Alas. Undo changes. */
|
/* Alas. Undo changes. */
|
sk->forward_alloc -= amt*TCP_MEM_QUANTUM;
|
sk->forward_alloc -= amt*TCP_MEM_QUANTUM;
|
atomic_sub(amt, &tcp_memory_allocated);
|
atomic_sub(amt, &tcp_memory_allocated);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
void __tcp_mem_reclaim(struct sock *sk)
|
void __tcp_mem_reclaim(struct sock *sk)
|
{
|
{
|
if (sk->forward_alloc >= TCP_MEM_QUANTUM) {
|
if (sk->forward_alloc >= TCP_MEM_QUANTUM) {
|
atomic_sub(sk->forward_alloc/TCP_MEM_QUANTUM, &tcp_memory_allocated);
|
atomic_sub(sk->forward_alloc/TCP_MEM_QUANTUM, &tcp_memory_allocated);
|
sk->forward_alloc &= (TCP_MEM_QUANTUM-1);
|
sk->forward_alloc &= (TCP_MEM_QUANTUM-1);
|
if (tcp_memory_pressure &&
|
if (tcp_memory_pressure &&
|
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
|
atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
|
tcp_memory_pressure = 0;
|
tcp_memory_pressure = 0;
|
}
|
}
|
}
|
}
|
|
|
void tcp_rfree(struct sk_buff *skb)
|
void tcp_rfree(struct sk_buff *skb)
|
{
|
{
|
struct sock *sk = skb->sk;
|
struct sock *sk = skb->sk;
|
|
|
atomic_sub(skb->truesize, &sk->rmem_alloc);
|
atomic_sub(skb->truesize, &sk->rmem_alloc);
|
sk->forward_alloc += skb->truesize;
|
sk->forward_alloc += skb->truesize;
|
}
|
}
|
|
|
/*
|
/*
|
* LISTEN is a special case for poll..
|
* LISTEN is a special case for poll..
|
*/
|
*/
|
static __inline__ unsigned int tcp_listen_poll(struct sock *sk, poll_table *wait)
|
static __inline__ unsigned int tcp_listen_poll(struct sock *sk, poll_table *wait)
|
{
|
{
|
return sk->tp_pinfo.af_tcp.accept_queue ? (POLLIN | POLLRDNORM) : 0;
|
return sk->tp_pinfo.af_tcp.accept_queue ? (POLLIN | POLLRDNORM) : 0;
|
}
|
}
|
|
|
/*
|
/*
|
* Wait for a TCP event.
|
* Wait for a TCP event.
|
*
|
*
|
* Note that we don't need to lock the socket, as the upper poll layers
|
* Note that we don't need to lock the socket, as the upper poll layers
|
* take care of normal races (between the test and the event) and we don't
|
* take care of normal races (between the test and the event) and we don't
|
* go look at any of the socket buffers directly.
|
* go look at any of the socket buffers directly.
|
*/
|
*/
|
unsigned int tcp_poll(struct file * file, struct socket *sock, poll_table *wait)
|
unsigned int tcp_poll(struct file * file, struct socket *sock, poll_table *wait)
|
{
|
{
|
unsigned int mask;
|
unsigned int mask;
|
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
|
|
poll_wait(file, sk->sleep, wait);
|
poll_wait(file, sk->sleep, wait);
|
if (sk->state == TCP_LISTEN)
|
if (sk->state == TCP_LISTEN)
|
return tcp_listen_poll(sk, wait);
|
return tcp_listen_poll(sk, wait);
|
|
|
/* Socket is not locked. We are protected from async events
|
/* Socket is not locked. We are protected from async events
|
by poll logic and correct handling of state changes
|
by poll logic and correct handling of state changes
|
made by another threads is impossible in any case.
|
made by another threads is impossible in any case.
|
*/
|
*/
|
|
|
mask = 0;
|
mask = 0;
|
if (sk->err)
|
if (sk->err)
|
mask = POLLERR;
|
mask = POLLERR;
|
|
|
/*
|
/*
|
* POLLHUP is certainly not done right. But poll() doesn't
|
* POLLHUP is certainly not done right. But poll() doesn't
|
* have a notion of HUP in just one direction, and for a
|
* have a notion of HUP in just one direction, and for a
|
* socket the read side is more interesting.
|
* socket the read side is more interesting.
|
*
|
*
|
* Some poll() documentation says that POLLHUP is incompatible
|
* Some poll() documentation says that POLLHUP is incompatible
|
* with the POLLOUT/POLLWR flags, so somebody should check this
|
* with the POLLOUT/POLLWR flags, so somebody should check this
|
* all. But careful, it tends to be safer to return too many
|
* all. But careful, it tends to be safer to return too many
|
* bits than too few, and you can easily break real applications
|
* bits than too few, and you can easily break real applications
|
* if you don't tell them that something has hung up!
|
* if you don't tell them that something has hung up!
|
*
|
*
|
* Check-me.
|
* Check-me.
|
*
|
*
|
* Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
|
* Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
|
* our fs/select.c). It means that after we received EOF,
|
* our fs/select.c). It means that after we received EOF,
|
* poll always returns immediately, making impossible poll() on write()
|
* poll always returns immediately, making impossible poll() on write()
|
* in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
|
* in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
|
* if and only if shutdown has been made in both directions.
|
* if and only if shutdown has been made in both directions.
|
* Actually, it is interesting to look how Solaris and DUX
|
* Actually, it is interesting to look how Solaris and DUX
|
* solve this dilemma. I would prefer, if PULLHUP were maskable,
|
* solve this dilemma. I would prefer, if PULLHUP were maskable,
|
* then we could set it on SND_SHUTDOWN. BTW examples given
|
* then we could set it on SND_SHUTDOWN. BTW examples given
|
* in Stevens' books assume exactly this behaviour, it explains
|
* in Stevens' books assume exactly this behaviour, it explains
|
* why PULLHUP is incompatible with POLLOUT. --ANK
|
* why PULLHUP is incompatible with POLLOUT. --ANK
|
*
|
*
|
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
|
* NOTE. Check for TCP_CLOSE is added. The goal is to prevent
|
* blocking on fresh not-connected or disconnected socket. --ANK
|
* blocking on fresh not-connected or disconnected socket. --ANK
|
*/
|
*/
|
if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
|
if (sk->shutdown == SHUTDOWN_MASK || sk->state == TCP_CLOSE)
|
mask |= POLLHUP;
|
mask |= POLLHUP;
|
if (sk->shutdown & RCV_SHUTDOWN)
|
if (sk->shutdown & RCV_SHUTDOWN)
|
mask |= POLLIN | POLLRDNORM;
|
mask |= POLLIN | POLLRDNORM;
|
|
|
/* Connected? */
|
/* Connected? */
|
if ((1 << sk->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
|
if ((1 << sk->state) & ~(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
|
/* Potential race condition. If read of tp below will
|
/* Potential race condition. If read of tp below will
|
* escape above sk->state, we can be illegally awaken
|
* escape above sk->state, we can be illegally awaken
|
* in SYN_* states. */
|
* in SYN_* states. */
|
if ((tp->rcv_nxt != tp->copied_seq) &&
|
if ((tp->rcv_nxt != tp->copied_seq) &&
|
(tp->urg_seq != tp->copied_seq ||
|
(tp->urg_seq != tp->copied_seq ||
|
tp->rcv_nxt != tp->copied_seq+1 ||
|
tp->rcv_nxt != tp->copied_seq+1 ||
|
sk->urginline || !tp->urg_data))
|
sk->urginline || !tp->urg_data))
|
mask |= POLLIN | POLLRDNORM;
|
mask |= POLLIN | POLLRDNORM;
|
|
|
if (!(sk->shutdown & SEND_SHUTDOWN)) {
|
if (!(sk->shutdown & SEND_SHUTDOWN)) {
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
|
mask |= POLLOUT | POLLWRNORM;
|
mask |= POLLOUT | POLLWRNORM;
|
} else { /* send SIGIO later */
|
} else { /* send SIGIO later */
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
|
|
/* Race breaker. If space is freed after
|
/* Race breaker. If space is freed after
|
* wspace test but before the flags are set,
|
* wspace test but before the flags are set,
|
* IO signal will be lost.
|
* IO signal will be lost.
|
*/
|
*/
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk))
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk))
|
mask |= POLLOUT | POLLWRNORM;
|
mask |= POLLOUT | POLLWRNORM;
|
}
|
}
|
}
|
}
|
|
|
if (tp->urg_data & TCP_URG_VALID)
|
if (tp->urg_data & TCP_URG_VALID)
|
mask |= POLLPRI;
|
mask |= POLLPRI;
|
}
|
}
|
return mask;
|
return mask;
|
}
|
}
|
|
|
/*
|
/*
|
* TCP socket write_space callback.
|
* TCP socket write_space callback.
|
*/
|
*/
|
void tcp_write_space(struct sock *sk)
|
void tcp_write_space(struct sock *sk)
|
{
|
{
|
struct socket *sock = sk->socket;
|
struct socket *sock = sk->socket;
|
|
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
|
if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
|
clear_bit(SOCK_NOSPACE, &sock->flags);
|
clear_bit(SOCK_NOSPACE, &sock->flags);
|
|
|
if (sk->sleep && waitqueue_active(sk->sleep))
|
if (sk->sleep && waitqueue_active(sk->sleep))
|
wake_up_interruptible(sk->sleep);
|
wake_up_interruptible(sk->sleep);
|
|
|
if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
|
if (sock->fasync_list && !(sk->shutdown&SEND_SHUTDOWN))
|
sock_wake_async(sock, 2, POLL_OUT);
|
sock_wake_async(sock, 2, POLL_OUT);
|
}
|
}
|
}
|
}
|
|
|
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int answ;
|
int answ;
|
|
|
switch(cmd) {
|
switch(cmd) {
|
case SIOCINQ:
|
case SIOCINQ:
|
if (sk->state == TCP_LISTEN)
|
if (sk->state == TCP_LISTEN)
|
return(-EINVAL);
|
return(-EINVAL);
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
if ((1<<sk->state) & (TCPF_SYN_SENT|TCPF_SYN_RECV))
|
if ((1<<sk->state) & (TCPF_SYN_SENT|TCPF_SYN_RECV))
|
answ = 0;
|
answ = 0;
|
else if (sk->urginline || !tp->urg_data ||
|
else if (sk->urginline || !tp->urg_data ||
|
before(tp->urg_seq,tp->copied_seq) ||
|
before(tp->urg_seq,tp->copied_seq) ||
|
!before(tp->urg_seq,tp->rcv_nxt)) {
|
!before(tp->urg_seq,tp->rcv_nxt)) {
|
answ = tp->rcv_nxt - tp->copied_seq;
|
answ = tp->rcv_nxt - tp->copied_seq;
|
|
|
/* Subtract 1, if FIN is in queue. */
|
/* Subtract 1, if FIN is in queue. */
|
if (answ && !skb_queue_empty(&sk->receive_queue))
|
if (answ && !skb_queue_empty(&sk->receive_queue))
|
answ -= ((struct sk_buff*)sk->receive_queue.prev)->h.th->fin;
|
answ -= ((struct sk_buff*)sk->receive_queue.prev)->h.th->fin;
|
} else
|
} else
|
answ = tp->urg_seq - tp->copied_seq;
|
answ = tp->urg_seq - tp->copied_seq;
|
release_sock(sk);
|
release_sock(sk);
|
break;
|
break;
|
case SIOCATMARK:
|
case SIOCATMARK:
|
{
|
{
|
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
|
answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
|
break;
|
break;
|
}
|
}
|
case SIOCOUTQ:
|
case SIOCOUTQ:
|
if (sk->state == TCP_LISTEN)
|
if (sk->state == TCP_LISTEN)
|
return(-EINVAL);
|
return(-EINVAL);
|
|
|
if ((1<<sk->state) & (TCPF_SYN_SENT|TCPF_SYN_RECV))
|
if ((1<<sk->state) & (TCPF_SYN_SENT|TCPF_SYN_RECV))
|
answ = 0;
|
answ = 0;
|
else
|
else
|
answ = tp->write_seq - tp->snd_una;
|
answ = tp->write_seq - tp->snd_una;
|
break;
|
break;
|
default:
|
default:
|
return(-ENOIOCTLCMD);
|
return(-ENOIOCTLCMD);
|
};
|
};
|
|
|
return put_user(answ, (int *)arg);
|
return put_user(answ, (int *)arg);
|
}
|
}
|
|
|
|
|
int tcp_listen_start(struct sock *sk)
|
int tcp_listen_start(struct sock *sk)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_listen_opt *lopt;
|
struct tcp_listen_opt *lopt;
|
|
|
sk->max_ack_backlog = 0;
|
sk->max_ack_backlog = 0;
|
sk->ack_backlog = 0;
|
sk->ack_backlog = 0;
|
tp->accept_queue = tp->accept_queue_tail = NULL;
|
tp->accept_queue = tp->accept_queue_tail = NULL;
|
tp->syn_wait_lock = RW_LOCK_UNLOCKED;
|
tp->syn_wait_lock = RW_LOCK_UNLOCKED;
|
tcp_delack_init(tp);
|
tcp_delack_init(tp);
|
|
|
lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
|
lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
|
if (!lopt)
|
if (!lopt)
|
return -ENOMEM;
|
return -ENOMEM;
|
|
|
memset(lopt, 0, sizeof(struct tcp_listen_opt));
|
memset(lopt, 0, sizeof(struct tcp_listen_opt));
|
for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
|
for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
|
if ((1<<lopt->max_qlen_log) >= sysctl_max_syn_backlog)
|
if ((1<<lopt->max_qlen_log) >= sysctl_max_syn_backlog)
|
break;
|
break;
|
get_random_bytes(&lopt->hash_rnd, 4);
|
get_random_bytes(&lopt->hash_rnd, 4);
|
|
|
write_lock_bh(&tp->syn_wait_lock);
|
write_lock_bh(&tp->syn_wait_lock);
|
tp->listen_opt = lopt;
|
tp->listen_opt = lopt;
|
write_unlock_bh(&tp->syn_wait_lock);
|
write_unlock_bh(&tp->syn_wait_lock);
|
|
|
/* There is race window here: we announce ourselves listening,
|
/* There is race window here: we announce ourselves listening,
|
* but this transition is still not validated by get_port().
|
* but this transition is still not validated by get_port().
|
* It is OK, because this socket enters to hash table only
|
* It is OK, because this socket enters to hash table only
|
* after validation is complete.
|
* after validation is complete.
|
*/
|
*/
|
sk->state = TCP_LISTEN;
|
sk->state = TCP_LISTEN;
|
if (sk->prot->get_port(sk, sk->num) == 0) {
|
if (sk->prot->get_port(sk, sk->num) == 0) {
|
sk->sport = htons(sk->num);
|
sk->sport = htons(sk->num);
|
|
|
sk_dst_reset(sk);
|
sk_dst_reset(sk);
|
sk->prot->hash(sk);
|
sk->prot->hash(sk);
|
|
|
return 0;
|
return 0;
|
}
|
}
|
|
|
sk->state = TCP_CLOSE;
|
sk->state = TCP_CLOSE;
|
write_lock_bh(&tp->syn_wait_lock);
|
write_lock_bh(&tp->syn_wait_lock);
|
tp->listen_opt = NULL;
|
tp->listen_opt = NULL;
|
write_unlock_bh(&tp->syn_wait_lock);
|
write_unlock_bh(&tp->syn_wait_lock);
|
kfree(lopt);
|
kfree(lopt);
|
return -EADDRINUSE;
|
return -EADDRINUSE;
|
}
|
}
|
|
|
/*
|
/*
|
* This routine closes sockets which have been at least partially
|
* This routine closes sockets which have been at least partially
|
* opened, but not yet accepted.
|
* opened, but not yet accepted.
|
*/
|
*/
|
|
|
static void tcp_listen_stop (struct sock *sk)
|
static void tcp_listen_stop (struct sock *sk)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_listen_opt *lopt = tp->listen_opt;
|
struct tcp_listen_opt *lopt = tp->listen_opt;
|
struct open_request *acc_req = tp->accept_queue;
|
struct open_request *acc_req = tp->accept_queue;
|
struct open_request *req;
|
struct open_request *req;
|
int i;
|
int i;
|
|
|
tcp_delete_keepalive_timer(sk);
|
tcp_delete_keepalive_timer(sk);
|
|
|
/* make all the listen_opt local to us */
|
/* make all the listen_opt local to us */
|
write_lock_bh(&tp->syn_wait_lock);
|
write_lock_bh(&tp->syn_wait_lock);
|
tp->listen_opt =NULL;
|
tp->listen_opt =NULL;
|
write_unlock_bh(&tp->syn_wait_lock);
|
write_unlock_bh(&tp->syn_wait_lock);
|
tp->accept_queue = tp->accept_queue_tail = NULL;
|
tp->accept_queue = tp->accept_queue_tail = NULL;
|
|
|
if (lopt->qlen) {
|
if (lopt->qlen) {
|
for (i=0; i<TCP_SYNQ_HSIZE; i++) {
|
for (i=0; i<TCP_SYNQ_HSIZE; i++) {
|
while ((req = lopt->syn_table[i]) != NULL) {
|
while ((req = lopt->syn_table[i]) != NULL) {
|
lopt->syn_table[i] = req->dl_next;
|
lopt->syn_table[i] = req->dl_next;
|
lopt->qlen--;
|
lopt->qlen--;
|
tcp_openreq_free(req);
|
tcp_openreq_free(req);
|
|
|
/* Following specs, it would be better either to send FIN
|
/* Following specs, it would be better either to send FIN
|
* (and enter FIN-WAIT-1, it is normal close)
|
* (and enter FIN-WAIT-1, it is normal close)
|
* or to send active reset (abort).
|
* or to send active reset (abort).
|
* Certainly, it is pretty dangerous while synflood, but it is
|
* Certainly, it is pretty dangerous while synflood, but it is
|
* bad justification for our negligence 8)
|
* bad justification for our negligence 8)
|
* To be honest, we are not able to make either
|
* To be honest, we are not able to make either
|
* of the variants now. --ANK
|
* of the variants now. --ANK
|
*/
|
*/
|
}
|
}
|
}
|
}
|
}
|
}
|
BUG_TRAP(lopt->qlen == 0);
|
BUG_TRAP(lopt->qlen == 0);
|
|
|
kfree(lopt);
|
kfree(lopt);
|
|
|
while ((req=acc_req) != NULL) {
|
while ((req=acc_req) != NULL) {
|
struct sock *child = req->sk;
|
struct sock *child = req->sk;
|
|
|
acc_req = req->dl_next;
|
acc_req = req->dl_next;
|
|
|
local_bh_disable();
|
local_bh_disable();
|
bh_lock_sock(child);
|
bh_lock_sock(child);
|
BUG_TRAP(child->lock.users==0);
|
BUG_TRAP(child->lock.users==0);
|
sock_hold(child);
|
sock_hold(child);
|
|
|
tcp_disconnect(child, O_NONBLOCK);
|
tcp_disconnect(child, O_NONBLOCK);
|
|
|
sock_orphan(child);
|
sock_orphan(child);
|
|
|
atomic_inc(&tcp_orphan_count);
|
atomic_inc(&tcp_orphan_count);
|
|
|
tcp_destroy_sock(child);
|
tcp_destroy_sock(child);
|
|
|
bh_unlock_sock(child);
|
bh_unlock_sock(child);
|
local_bh_enable();
|
local_bh_enable();
|
sock_put(child);
|
sock_put(child);
|
|
|
tcp_acceptq_removed(sk);
|
tcp_acceptq_removed(sk);
|
tcp_openreq_fastfree(req);
|
tcp_openreq_fastfree(req);
|
}
|
}
|
BUG_TRAP(sk->ack_backlog == 0);
|
BUG_TRAP(sk->ack_backlog == 0);
|
}
|
}
|
|
|
/*
|
/*
|
* Wait for a socket to get into the connected state
|
* Wait for a socket to get into the connected state
|
*
|
*
|
* Note: Must be called with the socket locked.
|
* Note: Must be called with the socket locked.
|
*/
|
*/
|
static int wait_for_tcp_connect(struct sock * sk, int flags, long *timeo_p)
|
static int wait_for_tcp_connect(struct sock * sk, int flags, long *timeo_p)
|
{
|
{
|
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
DECLARE_WAITQUEUE(wait, tsk);
|
DECLARE_WAITQUEUE(wait, tsk);
|
|
|
while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
while((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) {
|
if(sk->err)
|
if(sk->err)
|
return sock_error(sk);
|
return sock_error(sk);
|
if((1 << sk->state) &
|
if((1 << sk->state) &
|
~(TCPF_SYN_SENT | TCPF_SYN_RECV))
|
~(TCPF_SYN_SENT | TCPF_SYN_RECV))
|
return -EPIPE;
|
return -EPIPE;
|
if(!*timeo_p)
|
if(!*timeo_p)
|
return -EAGAIN;
|
return -EAGAIN;
|
if(signal_pending(tsk))
|
if(signal_pending(tsk))
|
return sock_intr_errno(*timeo_p);
|
return sock_intr_errno(*timeo_p);
|
|
|
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
add_wait_queue(sk->sleep, &wait);
|
add_wait_queue(sk->sleep, &wait);
|
sk->tp_pinfo.af_tcp.write_pending++;
|
sk->tp_pinfo.af_tcp.write_pending++;
|
|
|
release_sock(sk);
|
release_sock(sk);
|
*timeo_p = schedule_timeout(*timeo_p);
|
*timeo_p = schedule_timeout(*timeo_p);
|
lock_sock(sk);
|
lock_sock(sk);
|
|
|
__set_task_state(tsk, TASK_RUNNING);
|
__set_task_state(tsk, TASK_RUNNING);
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
sk->tp_pinfo.af_tcp.write_pending--;
|
sk->tp_pinfo.af_tcp.write_pending--;
|
}
|
}
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline int tcp_memory_free(struct sock *sk)
|
static inline int tcp_memory_free(struct sock *sk)
|
{
|
{
|
return sk->wmem_queued < sk->sndbuf;
|
return sk->wmem_queued < sk->sndbuf;
|
}
|
}
|
|
|
/*
|
/*
|
* Wait for more memory for a socket
|
* Wait for more memory for a socket
|
*/
|
*/
|
static int wait_for_tcp_memory(struct sock * sk, long *timeo)
|
static int wait_for_tcp_memory(struct sock * sk, long *timeo)
|
{
|
{
|
int err = 0;
|
int err = 0;
|
long vm_wait = 0;
|
long vm_wait = 0;
|
long current_timeo = *timeo;
|
long current_timeo = *timeo;
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
if (tcp_memory_free(sk))
|
if (tcp_memory_free(sk))
|
current_timeo = vm_wait = (net_random()%(HZ/5))+2;
|
current_timeo = vm_wait = (net_random()%(HZ/5))+2;
|
|
|
add_wait_queue(sk->sleep, &wait);
|
add_wait_queue(sk->sleep, &wait);
|
for (;;) {
|
for (;;) {
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
|
if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
|
goto do_error;
|
goto do_error;
|
if (!*timeo)
|
if (!*timeo)
|
goto do_nonblock;
|
goto do_nonblock;
|
if (signal_pending(current))
|
if (signal_pending(current))
|
goto do_interrupted;
|
goto do_interrupted;
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
if (tcp_memory_free(sk) && !vm_wait)
|
if (tcp_memory_free(sk) && !vm_wait)
|
break;
|
break;
|
|
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
sk->tp_pinfo.af_tcp.write_pending++;
|
sk->tp_pinfo.af_tcp.write_pending++;
|
release_sock(sk);
|
release_sock(sk);
|
if (!tcp_memory_free(sk) || vm_wait)
|
if (!tcp_memory_free(sk) || vm_wait)
|
current_timeo = schedule_timeout(current_timeo);
|
current_timeo = schedule_timeout(current_timeo);
|
lock_sock(sk);
|
lock_sock(sk);
|
sk->tp_pinfo.af_tcp.write_pending--;
|
sk->tp_pinfo.af_tcp.write_pending--;
|
|
|
if (vm_wait) {
|
if (vm_wait) {
|
vm_wait -= current_timeo;
|
vm_wait -= current_timeo;
|
current_timeo = *timeo;
|
current_timeo = *timeo;
|
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
|
if (current_timeo != MAX_SCHEDULE_TIMEOUT &&
|
(current_timeo -= vm_wait) < 0)
|
(current_timeo -= vm_wait) < 0)
|
current_timeo = 0;
|
current_timeo = 0;
|
vm_wait = 0;
|
vm_wait = 0;
|
}
|
}
|
*timeo = current_timeo;
|
*timeo = current_timeo;
|
}
|
}
|
out:
|
out:
|
current->state = TASK_RUNNING;
|
current->state = TASK_RUNNING;
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
return err;
|
return err;
|
|
|
do_error:
|
do_error:
|
err = -EPIPE;
|
err = -EPIPE;
|
goto out;
|
goto out;
|
do_nonblock:
|
do_nonblock:
|
err = -EAGAIN;
|
err = -EAGAIN;
|
goto out;
|
goto out;
|
do_interrupted:
|
do_interrupted:
|
err = sock_intr_errno(*timeo);
|
err = sock_intr_errno(*timeo);
|
goto out;
|
goto out;
|
}
|
}
|
|
|
ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags);
|
ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags);
|
|
|
static inline int
|
static inline int
|
can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
|
can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
|
{
|
{
|
if (i) {
|
if (i) {
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
|
return page == frag->page &&
|
return page == frag->page &&
|
off == frag->page_offset+frag->size;
|
off == frag->page_offset+frag->size;
|
}
|
}
|
return 0;
|
return 0;
|
}
|
}
|
|
|
static inline void
|
static inline void
|
fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size)
|
fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size)
|
{
|
{
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
frag->page = page;
|
frag->page = page;
|
frag->page_offset = off;
|
frag->page_offset = off;
|
frag->size = size;
|
frag->size = size;
|
skb_shinfo(skb)->nr_frags = i+1;
|
skb_shinfo(skb)->nr_frags = i+1;
|
}
|
}
|
|
|
static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
|
static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
|
{
|
{
|
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
|
tp->pushed_seq = tp->write_seq;
|
tp->pushed_seq = tp->write_seq;
|
}
|
}
|
|
|
static inline int forced_push(struct tcp_opt *tp)
|
static inline int forced_push(struct tcp_opt *tp)
|
{
|
{
|
return after(tp->write_seq, tp->pushed_seq + (tp->max_window>>1));
|
return after(tp->write_seq, tp->pushed_seq + (tp->max_window>>1));
|
}
|
}
|
|
|
static inline void
|
static inline void
|
skb_entail(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
|
skb_entail(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
|
{
|
{
|
skb->csum = 0;
|
skb->csum = 0;
|
TCP_SKB_CB(skb)->seq = tp->write_seq;
|
TCP_SKB_CB(skb)->seq = tp->write_seq;
|
TCP_SKB_CB(skb)->end_seq = tp->write_seq;
|
TCP_SKB_CB(skb)->end_seq = tp->write_seq;
|
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
|
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
|
TCP_SKB_CB(skb)->sacked = 0;
|
TCP_SKB_CB(skb)->sacked = 0;
|
__skb_queue_tail(&sk->write_queue, skb);
|
__skb_queue_tail(&sk->write_queue, skb);
|
tcp_charge_skb(sk, skb);
|
tcp_charge_skb(sk, skb);
|
if (tp->send_head == NULL)
|
if (tp->send_head == NULL)
|
tp->send_head = skb;
|
tp->send_head = skb;
|
}
|
}
|
|
|
static inline void
|
static inline void
|
tcp_mark_urg(struct tcp_opt *tp, int flags, struct sk_buff *skb)
|
tcp_mark_urg(struct tcp_opt *tp, int flags, struct sk_buff *skb)
|
{
|
{
|
if (flags & MSG_OOB) {
|
if (flags & MSG_OOB) {
|
tp->urg_mode = 1;
|
tp->urg_mode = 1;
|
tp->snd_up = tp->write_seq;
|
tp->snd_up = tp->write_seq;
|
TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
|
TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
|
}
|
}
|
}
|
}
|
|
|
static inline void
|
static inline void
|
tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, int mss_now, int nonagle)
|
tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, int mss_now, int nonagle)
|
{
|
{
|
if (tp->send_head) {
|
if (tp->send_head) {
|
struct sk_buff *skb = sk->write_queue.prev;
|
struct sk_buff *skb = sk->write_queue.prev;
|
if (!(flags&MSG_MORE) || forced_push(tp))
|
if (!(flags&MSG_MORE) || forced_push(tp))
|
tcp_mark_push(tp, skb);
|
tcp_mark_push(tp, skb);
|
tcp_mark_urg(tp, flags, skb);
|
tcp_mark_urg(tp, flags, skb);
|
__tcp_push_pending_frames(sk, tp, mss_now, (flags&MSG_MORE) ? 2 : nonagle);
|
__tcp_push_pending_frames(sk, tp, mss_now, (flags&MSG_MORE) ? 2 : nonagle);
|
}
|
}
|
}
|
}
|
|
|
static int tcp_error(struct sock *sk, int flags, int err)
|
static int tcp_error(struct sock *sk, int flags, int err)
|
{
|
{
|
if (err == -EPIPE)
|
if (err == -EPIPE)
|
err = sock_error(sk) ? : -EPIPE;
|
err = sock_error(sk) ? : -EPIPE;
|
if (err == -EPIPE && !(flags&MSG_NOSIGNAL))
|
if (err == -EPIPE && !(flags&MSG_NOSIGNAL))
|
send_sig(SIGPIPE, current, 0);
|
send_sig(SIGPIPE, current, 0);
|
return err;
|
return err;
|
}
|
}
|
|
|
ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags)
|
ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int mss_now;
|
int mss_now;
|
int err;
|
int err;
|
ssize_t copied;
|
ssize_t copied;
|
long timeo = sock_sndtimeo(sk, flags&MSG_DONTWAIT);
|
long timeo = sock_sndtimeo(sk, flags&MSG_DONTWAIT);
|
|
|
/* Wait for a connection to finish. */
|
/* Wait for a connection to finish. */
|
if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
|
if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
|
if((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
|
if((err = wait_for_tcp_connect(sk, 0, &timeo)) != 0)
|
goto out_err;
|
goto out_err;
|
|
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
|
|
mss_now = tcp_current_mss(sk);
|
mss_now = tcp_current_mss(sk);
|
copied = 0;
|
copied = 0;
|
|
|
err = -EPIPE;
|
err = -EPIPE;
|
if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
|
if (sk->err || (sk->shutdown & SEND_SHUTDOWN))
|
goto do_error;
|
goto do_error;
|
|
|
while (psize > 0) {
|
while (psize > 0) {
|
struct sk_buff *skb = sk->write_queue.prev;
|
struct sk_buff *skb = sk->write_queue.prev;
|
int offset, size, copy, i;
|
int offset, size, copy, i;
|
struct page *page;
|
struct page *page;
|
|
|
page = pages[poffset/PAGE_SIZE];
|
page = pages[poffset/PAGE_SIZE];
|
offset = poffset % PAGE_SIZE;
|
offset = poffset % PAGE_SIZE;
|
size = min_t(size_t, psize, PAGE_SIZE-offset);
|
size = min_t(size_t, psize, PAGE_SIZE-offset);
|
|
|
if (tp->send_head==NULL || (copy = mss_now - skb->len) <= 0) {
|
if (tp->send_head==NULL || (copy = mss_now - skb->len) <= 0) {
|
new_segment:
|
new_segment:
|
if (!tcp_memory_free(sk))
|
if (!tcp_memory_free(sk))
|
goto wait_for_sndbuf;
|
goto wait_for_sndbuf;
|
|
|
skb = tcp_alloc_pskb(sk, 0, tp->mss_cache, sk->allocation);
|
skb = tcp_alloc_pskb(sk, 0, tp->mss_cache, sk->allocation);
|
if (skb == NULL)
|
if (skb == NULL)
|
goto wait_for_memory;
|
goto wait_for_memory;
|
|
|
skb_entail(sk, tp, skb);
|
skb_entail(sk, tp, skb);
|
copy = mss_now;
|
copy = mss_now;
|
}
|
}
|
|
|
if (copy > size)
|
if (copy > size)
|
copy = size;
|
copy = size;
|
|
|
i = skb_shinfo(skb)->nr_frags;
|
i = skb_shinfo(skb)->nr_frags;
|
if (can_coalesce(skb, i, page, offset)) {
|
if (can_coalesce(skb, i, page, offset)) {
|
skb_shinfo(skb)->frags[i-1].size += copy;
|
skb_shinfo(skb)->frags[i-1].size += copy;
|
} else if (i < MAX_SKB_FRAGS) {
|
} else if (i < MAX_SKB_FRAGS) {
|
get_page(page);
|
get_page(page);
|
fill_page_desc(skb, i, page, offset, copy);
|
fill_page_desc(skb, i, page, offset, copy);
|
} else {
|
} else {
|
tcp_mark_push(tp, skb);
|
tcp_mark_push(tp, skb);
|
goto new_segment;
|
goto new_segment;
|
}
|
}
|
|
|
skb->len += copy;
|
skb->len += copy;
|
skb->data_len += copy;
|
skb->data_len += copy;
|
skb->ip_summed = CHECKSUM_HW;
|
skb->ip_summed = CHECKSUM_HW;
|
tp->write_seq += copy;
|
tp->write_seq += copy;
|
TCP_SKB_CB(skb)->end_seq += copy;
|
TCP_SKB_CB(skb)->end_seq += copy;
|
|
|
if (!copied)
|
if (!copied)
|
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
|
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
|
|
|
copied += copy;
|
copied += copy;
|
poffset += copy;
|
poffset += copy;
|
if (!(psize -= copy))
|
if (!(psize -= copy))
|
goto out;
|
goto out;
|
|
|
if (skb->len != mss_now || (flags&MSG_OOB))
|
if (skb->len != mss_now || (flags&MSG_OOB))
|
continue;
|
continue;
|
|
|
if (forced_push(tp)) {
|
if (forced_push(tp)) {
|
tcp_mark_push(tp, skb);
|
tcp_mark_push(tp, skb);
|
__tcp_push_pending_frames(sk, tp, mss_now, 1);
|
__tcp_push_pending_frames(sk, tp, mss_now, 1);
|
} else if (skb == tp->send_head)
|
} else if (skb == tp->send_head)
|
tcp_push_one(sk, mss_now);
|
tcp_push_one(sk, mss_now);
|
continue;
|
continue;
|
|
|
wait_for_sndbuf:
|
wait_for_sndbuf:
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
wait_for_memory:
|
wait_for_memory:
|
if (copied)
|
if (copied)
|
tcp_push(sk, tp, flags&~MSG_MORE, mss_now, 1);
|
tcp_push(sk, tp, flags&~MSG_MORE, mss_now, 1);
|
|
|
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
|
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
|
goto do_error;
|
goto do_error;
|
|
|
mss_now = tcp_current_mss(sk);
|
mss_now = tcp_current_mss(sk);
|
}
|
}
|
|
|
out:
|
out:
|
if (copied)
|
if (copied)
|
tcp_push(sk, tp, flags, mss_now, tp->nonagle);
|
tcp_push(sk, tp, flags, mss_now, tp->nonagle);
|
return copied;
|
return copied;
|
|
|
do_error:
|
do_error:
|
if (copied)
|
if (copied)
|
goto out;
|
goto out;
|
out_err:
|
out_err:
|
return tcp_error(sk, flags, err);
|
return tcp_error(sk, flags, err);
|
}
|
}
|
|
|
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
|
ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
|
{
|
{
|
ssize_t res;
|
ssize_t res;
|
struct sock *sk = sock->sk;
|
struct sock *sk = sock->sk;
|
|
|
#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
|
#define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM)
|
|
|
if (!(sk->route_caps & NETIF_F_SG) ||
|
if (!(sk->route_caps & NETIF_F_SG) ||
|
!(sk->route_caps & TCP_ZC_CSUM_FLAGS))
|
!(sk->route_caps & TCP_ZC_CSUM_FLAGS))
|
return sock_no_sendpage(sock, page, offset, size, flags);
|
return sock_no_sendpage(sock, page, offset, size, flags);
|
|
|
#undef TCP_ZC_CSUM_FLAGS
|
#undef TCP_ZC_CSUM_FLAGS
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
res = do_tcp_sendpages(sk, &page, offset, size, flags);
|
res = do_tcp_sendpages(sk, &page, offset, size, flags);
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
release_sock(sk);
|
release_sock(sk);
|
return res;
|
return res;
|
}
|
}
|
|
|
#define TCP_PAGE(sk) (sk->tp_pinfo.af_tcp.sndmsg_page)
|
#define TCP_PAGE(sk) (sk->tp_pinfo.af_tcp.sndmsg_page)
|
#define TCP_OFF(sk) (sk->tp_pinfo.af_tcp.sndmsg_off)
|
#define TCP_OFF(sk) (sk->tp_pinfo.af_tcp.sndmsg_off)
|
|
|
static inline int
|
static inline int
|
tcp_copy_to_page(struct sock *sk, char *from, struct sk_buff *skb,
|
tcp_copy_to_page(struct sock *sk, char *from, struct sk_buff *skb,
|
struct page *page, int off, int copy)
|
struct page *page, int off, int copy)
|
{
|
{
|
int err = 0;
|
int err = 0;
|
unsigned int csum;
|
unsigned int csum;
|
|
|
csum = csum_and_copy_from_user(from, page_address(page)+off,
|
csum = csum_and_copy_from_user(from, page_address(page)+off,
|
copy, 0, &err);
|
copy, 0, &err);
|
if (!err) {
|
if (!err) {
|
if (skb->ip_summed == CHECKSUM_NONE)
|
if (skb->ip_summed == CHECKSUM_NONE)
|
skb->csum = csum_block_add(skb->csum, csum, skb->len);
|
skb->csum = csum_block_add(skb->csum, csum, skb->len);
|
skb->len += copy;
|
skb->len += copy;
|
skb->data_len += copy;
|
skb->data_len += copy;
|
skb->truesize += copy;
|
skb->truesize += copy;
|
sk->wmem_queued += copy;
|
sk->wmem_queued += copy;
|
sk->forward_alloc -= copy;
|
sk->forward_alloc -= copy;
|
}
|
}
|
return err;
|
return err;
|
}
|
}
|
|
|
static inline int
|
static inline int
|
skb_add_data(struct sk_buff *skb, char *from, int copy)
|
skb_add_data(struct sk_buff *skb, char *from, int copy)
|
{
|
{
|
int err = 0;
|
int err = 0;
|
unsigned int csum;
|
unsigned int csum;
|
int off = skb->len;
|
int off = skb->len;
|
|
|
csum = csum_and_copy_from_user(from, skb_put(skb, copy),
|
csum = csum_and_copy_from_user(from, skb_put(skb, copy),
|
copy, 0, &err);
|
copy, 0, &err);
|
if (!err) {
|
if (!err) {
|
skb->csum = csum_block_add(skb->csum, csum, off);
|
skb->csum = csum_block_add(skb->csum, csum, off);
|
return 0;
|
return 0;
|
}
|
}
|
|
|
__skb_trim(skb, off);
|
__skb_trim(skb, off);
|
return -EFAULT;
|
return -EFAULT;
|
}
|
}
|
|
|
static inline int select_size(struct sock *sk, struct tcp_opt *tp)
|
static inline int select_size(struct sock *sk, struct tcp_opt *tp)
|
{
|
{
|
int tmp = tp->mss_cache;
|
int tmp = tp->mss_cache;
|
|
|
if (sk->route_caps&NETIF_F_SG) {
|
if (sk->route_caps&NETIF_F_SG) {
|
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
|
int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
|
|
|
if (tmp >= pgbreak && tmp <= pgbreak + (MAX_SKB_FRAGS-1)*PAGE_SIZE)
|
if (tmp >= pgbreak && tmp <= pgbreak + (MAX_SKB_FRAGS-1)*PAGE_SIZE)
|
tmp = pgbreak;
|
tmp = pgbreak;
|
}
|
}
|
return tmp;
|
return tmp;
|
}
|
}
|
|
|
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
|
int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size)
|
{
|
{
|
struct iovec *iov;
|
struct iovec *iov;
|
struct tcp_opt *tp;
|
struct tcp_opt *tp;
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
int iovlen, flags;
|
int iovlen, flags;
|
int mss_now;
|
int mss_now;
|
int err, copied;
|
int err, copied;
|
long timeo;
|
long timeo;
|
|
|
tp = &(sk->tp_pinfo.af_tcp);
|
tp = &(sk->tp_pinfo.af_tcp);
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
|
|
flags = msg->msg_flags;
|
flags = msg->msg_flags;
|
timeo = sock_sndtimeo(sk, flags&MSG_DONTWAIT);
|
timeo = sock_sndtimeo(sk, flags&MSG_DONTWAIT);
|
|
|
/* Wait for a connection to finish. */
|
/* Wait for a connection to finish. */
|
if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
|
if ((1 << sk->state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
|
if((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
|
if((err = wait_for_tcp_connect(sk, flags, &timeo)) != 0)
|
goto out_err;
|
goto out_err;
|
|
|
/* This should be in poll */
|
/* This should be in poll */
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
clear_bit(SOCK_ASYNC_NOSPACE, &sk->socket->flags);
|
|
|
mss_now = tcp_current_mss(sk);
|
mss_now = tcp_current_mss(sk);
|
|
|
/* Ok commence sending. */
|
/* Ok commence sending. */
|
iovlen = msg->msg_iovlen;
|
iovlen = msg->msg_iovlen;
|
iov = msg->msg_iov;
|
iov = msg->msg_iov;
|
copied = 0;
|
copied = 0;
|
|
|
err = -EPIPE;
|
err = -EPIPE;
|
if (sk->err || (sk->shutdown&SEND_SHUTDOWN))
|
if (sk->err || (sk->shutdown&SEND_SHUTDOWN))
|
goto do_error;
|
goto do_error;
|
|
|
while (--iovlen >= 0) {
|
while (--iovlen >= 0) {
|
int seglen=iov->iov_len;
|
int seglen=iov->iov_len;
|
unsigned char * from=iov->iov_base;
|
unsigned char * from=iov->iov_base;
|
|
|
iov++;
|
iov++;
|
|
|
while (seglen > 0) {
|
while (seglen > 0) {
|
int copy;
|
int copy;
|
|
|
skb = sk->write_queue.prev;
|
skb = sk->write_queue.prev;
|
|
|
if (tp->send_head == NULL ||
|
if (tp->send_head == NULL ||
|
(copy = mss_now - skb->len) <= 0) {
|
(copy = mss_now - skb->len) <= 0) {
|
|
|
new_segment:
|
new_segment:
|
/* Allocate new segment. If the interface is SG,
|
/* Allocate new segment. If the interface is SG,
|
* allocate skb fitting to single page.
|
* allocate skb fitting to single page.
|
*/
|
*/
|
if (!tcp_memory_free(sk))
|
if (!tcp_memory_free(sk))
|
goto wait_for_sndbuf;
|
goto wait_for_sndbuf;
|
|
|
skb = tcp_alloc_pskb(sk, select_size(sk, tp), 0, sk->allocation);
|
skb = tcp_alloc_pskb(sk, select_size(sk, tp), 0, sk->allocation);
|
if (skb == NULL)
|
if (skb == NULL)
|
goto wait_for_memory;
|
goto wait_for_memory;
|
|
|
skb_entail(sk, tp, skb);
|
skb_entail(sk, tp, skb);
|
copy = mss_now;
|
copy = mss_now;
|
}
|
}
|
|
|
/* Try to append data to the end of skb. */
|
/* Try to append data to the end of skb. */
|
if (copy > seglen)
|
if (copy > seglen)
|
copy = seglen;
|
copy = seglen;
|
|
|
/* Where to copy to? */
|
/* Where to copy to? */
|
if (skb_tailroom(skb) > 0) {
|
if (skb_tailroom(skb) > 0) {
|
/* We have some space in skb head. Superb! */
|
/* We have some space in skb head. Superb! */
|
if (copy > skb_tailroom(skb))
|
if (copy > skb_tailroom(skb))
|
copy = skb_tailroom(skb);
|
copy = skb_tailroom(skb);
|
if ((err = skb_add_data(skb, from, copy)) != 0)
|
if ((err = skb_add_data(skb, from, copy)) != 0)
|
goto do_fault;
|
goto do_fault;
|
} else {
|
} else {
|
int merge = 0;
|
int merge = 0;
|
int i = skb_shinfo(skb)->nr_frags;
|
int i = skb_shinfo(skb)->nr_frags;
|
struct page *page = TCP_PAGE(sk);
|
struct page *page = TCP_PAGE(sk);
|
int off = TCP_OFF(sk);
|
int off = TCP_OFF(sk);
|
|
|
if (can_coalesce(skb, i, page, off) && off != PAGE_SIZE) {
|
if (can_coalesce(skb, i, page, off) && off != PAGE_SIZE) {
|
/* We can extend the last page fragment. */
|
/* We can extend the last page fragment. */
|
merge = 1;
|
merge = 1;
|
} else if (i == MAX_SKB_FRAGS ||
|
} else if (i == MAX_SKB_FRAGS ||
|
(i == 0 && !(sk->route_caps&NETIF_F_SG))) {
|
(i == 0 && !(sk->route_caps&NETIF_F_SG))) {
|
/* Need to add new fragment and cannot
|
/* Need to add new fragment and cannot
|
* do this because interface is non-SG,
|
* do this because interface is non-SG,
|
* or because all the page slots are busy.
|
* or because all the page slots are busy.
|
*/
|
*/
|
tcp_mark_push(tp, skb);
|
tcp_mark_push(tp, skb);
|
goto new_segment;
|
goto new_segment;
|
} else if (page) {
|
} else if (page) {
|
/* If page is cached, align
|
/* If page is cached, align
|
* offset to L1 cache boundary
|
* offset to L1 cache boundary
|
*/
|
*/
|
off = (off+L1_CACHE_BYTES-1)&~(L1_CACHE_BYTES-1);
|
off = (off+L1_CACHE_BYTES-1)&~(L1_CACHE_BYTES-1);
|
if (off == PAGE_SIZE) {
|
if (off == PAGE_SIZE) {
|
put_page(page);
|
put_page(page);
|
TCP_PAGE(sk) = page = NULL;
|
TCP_PAGE(sk) = page = NULL;
|
}
|
}
|
}
|
}
|
|
|
if (!page) {
|
if (!page) {
|
/* Allocate new cache page. */
|
/* Allocate new cache page. */
|
if (!(page=tcp_alloc_page(sk)))
|
if (!(page=tcp_alloc_page(sk)))
|
goto wait_for_memory;
|
goto wait_for_memory;
|
off = 0;
|
off = 0;
|
}
|
}
|
|
|
if (copy > PAGE_SIZE-off)
|
if (copy > PAGE_SIZE-off)
|
copy = PAGE_SIZE-off;
|
copy = PAGE_SIZE-off;
|
|
|
/* Time to copy data. We are close to the end! */
|
/* Time to copy data. We are close to the end! */
|
err = tcp_copy_to_page(sk, from, skb, page, off, copy);
|
err = tcp_copy_to_page(sk, from, skb, page, off, copy);
|
if (err) {
|
if (err) {
|
/* If this page was new, give it to the
|
/* If this page was new, give it to the
|
* socket so it does not get leaked.
|
* socket so it does not get leaked.
|
*/
|
*/
|
if (TCP_PAGE(sk) == NULL) {
|
if (TCP_PAGE(sk) == NULL) {
|
TCP_PAGE(sk) = page;
|
TCP_PAGE(sk) = page;
|
TCP_OFF(sk) = 0;
|
TCP_OFF(sk) = 0;
|
}
|
}
|
goto do_error;
|
goto do_error;
|
}
|
}
|
|
|
/* Update the skb. */
|
/* Update the skb. */
|
if (merge) {
|
if (merge) {
|
skb_shinfo(skb)->frags[i-1].size += copy;
|
skb_shinfo(skb)->frags[i-1].size += copy;
|
} else {
|
} else {
|
fill_page_desc(skb, i, page, off, copy);
|
fill_page_desc(skb, i, page, off, copy);
|
if (TCP_PAGE(sk)) {
|
if (TCP_PAGE(sk)) {
|
get_page(page);
|
get_page(page);
|
} else if (off + copy < PAGE_SIZE) {
|
} else if (off + copy < PAGE_SIZE) {
|
get_page(page);
|
get_page(page);
|
TCP_PAGE(sk) = page;
|
TCP_PAGE(sk) = page;
|
}
|
}
|
}
|
}
|
|
|
TCP_OFF(sk) = off+copy;
|
TCP_OFF(sk) = off+copy;
|
}
|
}
|
|
|
if (!copied)
|
if (!copied)
|
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
|
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
|
|
|
tp->write_seq += copy;
|
tp->write_seq += copy;
|
TCP_SKB_CB(skb)->end_seq += copy;
|
TCP_SKB_CB(skb)->end_seq += copy;
|
|
|
from += copy;
|
from += copy;
|
copied += copy;
|
copied += copy;
|
if ((seglen -= copy) == 0 && iovlen == 0)
|
if ((seglen -= copy) == 0 && iovlen == 0)
|
goto out;
|
goto out;
|
|
|
if (skb->len != mss_now || (flags&MSG_OOB))
|
if (skb->len != mss_now || (flags&MSG_OOB))
|
continue;
|
continue;
|
|
|
if (forced_push(tp)) {
|
if (forced_push(tp)) {
|
tcp_mark_push(tp, skb);
|
tcp_mark_push(tp, skb);
|
__tcp_push_pending_frames(sk, tp, mss_now, 1);
|
__tcp_push_pending_frames(sk, tp, mss_now, 1);
|
} else if (skb == tp->send_head)
|
} else if (skb == tp->send_head)
|
tcp_push_one(sk, mss_now);
|
tcp_push_one(sk, mss_now);
|
continue;
|
continue;
|
|
|
wait_for_sndbuf:
|
wait_for_sndbuf:
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
set_bit(SOCK_NOSPACE, &sk->socket->flags);
|
wait_for_memory:
|
wait_for_memory:
|
if (copied)
|
if (copied)
|
tcp_push(sk, tp, flags&~MSG_MORE, mss_now, 1);
|
tcp_push(sk, tp, flags&~MSG_MORE, mss_now, 1);
|
|
|
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
|
if ((err = wait_for_tcp_memory(sk, &timeo)) != 0)
|
goto do_error;
|
goto do_error;
|
|
|
mss_now = tcp_current_mss(sk);
|
mss_now = tcp_current_mss(sk);
|
}
|
}
|
}
|
}
|
|
|
out:
|
out:
|
if (copied)
|
if (copied)
|
tcp_push(sk, tp, flags, mss_now, tp->nonagle);
|
tcp_push(sk, tp, flags, mss_now, tp->nonagle);
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
release_sock(sk);
|
release_sock(sk);
|
return copied;
|
return copied;
|
|
|
do_fault:
|
do_fault:
|
if (skb->len == 0) {
|
if (skb->len == 0) {
|
if (tp->send_head == skb)
|
if (tp->send_head == skb)
|
tp->send_head = NULL;
|
tp->send_head = NULL;
|
__skb_unlink(skb, skb->list);
|
__skb_unlink(skb, skb->list);
|
tcp_free_skb(sk, skb);
|
tcp_free_skb(sk, skb);
|
}
|
}
|
|
|
do_error:
|
do_error:
|
if (copied)
|
if (copied)
|
goto out;
|
goto out;
|
out_err:
|
out_err:
|
err = tcp_error(sk, flags, err);
|
err = tcp_error(sk, flags, err);
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
release_sock(sk);
|
release_sock(sk);
|
return err;
|
return err;
|
}
|
}
|
|
|
/*
|
/*
|
* Handle reading urgent data. BSD has very simple semantics for
|
* Handle reading urgent data. BSD has very simple semantics for
|
* this, no blocking and very strange errors 8)
|
* this, no blocking and very strange errors 8)
|
*/
|
*/
|
|
|
static int tcp_recv_urg(struct sock * sk, long timeo,
|
static int tcp_recv_urg(struct sock * sk, long timeo,
|
struct msghdr *msg, int len, int flags,
|
struct msghdr *msg, int len, int flags,
|
int *addr_len)
|
int *addr_len)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
|
|
/* No URG data to read. */
|
/* No URG data to read. */
|
if (sk->urginline || !tp->urg_data || tp->urg_data == TCP_URG_READ)
|
if (sk->urginline || !tp->urg_data || tp->urg_data == TCP_URG_READ)
|
return -EINVAL; /* Yes this is right ! */
|
return -EINVAL; /* Yes this is right ! */
|
|
|
if (sk->state==TCP_CLOSE && !sk->done)
|
if (sk->state==TCP_CLOSE && !sk->done)
|
return -ENOTCONN;
|
return -ENOTCONN;
|
|
|
if (tp->urg_data & TCP_URG_VALID) {
|
if (tp->urg_data & TCP_URG_VALID) {
|
int err = 0;
|
int err = 0;
|
char c = tp->urg_data;
|
char c = tp->urg_data;
|
|
|
if (!(flags & MSG_PEEK))
|
if (!(flags & MSG_PEEK))
|
tp->urg_data = TCP_URG_READ;
|
tp->urg_data = TCP_URG_READ;
|
|
|
/* Read urgent data. */
|
/* Read urgent data. */
|
msg->msg_flags|=MSG_OOB;
|
msg->msg_flags|=MSG_OOB;
|
|
|
if(len>0) {
|
if(len>0) {
|
if (!(flags & MSG_TRUNC))
|
if (!(flags & MSG_TRUNC))
|
err = memcpy_toiovec(msg->msg_iov, &c, 1);
|
err = memcpy_toiovec(msg->msg_iov, &c, 1);
|
len = 1;
|
len = 1;
|
} else
|
} else
|
msg->msg_flags|=MSG_TRUNC;
|
msg->msg_flags|=MSG_TRUNC;
|
|
|
return err ? -EFAULT : len;
|
return err ? -EFAULT : len;
|
}
|
}
|
|
|
if (sk->state == TCP_CLOSE || (sk->shutdown & RCV_SHUTDOWN))
|
if (sk->state == TCP_CLOSE || (sk->shutdown & RCV_SHUTDOWN))
|
return 0;
|
return 0;
|
|
|
/* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
|
/* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
|
* the available implementations agree in this case:
|
* the available implementations agree in this case:
|
* this call should never block, independent of the
|
* this call should never block, independent of the
|
* blocking state of the socket.
|
* blocking state of the socket.
|
* Mike <pall@rz.uni-karlsruhe.de>
|
* Mike <pall@rz.uni-karlsruhe.de>
|
*/
|
*/
|
return -EAGAIN;
|
return -EAGAIN;
|
}
|
}
|
|
|
/*
|
/*
|
* Release a skb if it is no longer needed. This routine
|
* Release a skb if it is no longer needed. This routine
|
* must be called with interrupts disabled or with the
|
* must be called with interrupts disabled or with the
|
* socket locked so that the sk_buff queue operation is ok.
|
* socket locked so that the sk_buff queue operation is ok.
|
*/
|
*/
|
|
|
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb)
|
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb)
|
{
|
{
|
__skb_unlink(skb, &sk->receive_queue);
|
__skb_unlink(skb, &sk->receive_queue);
|
__kfree_skb(skb);
|
__kfree_skb(skb);
|
}
|
}
|
|
|
/* Clean up the receive buffer for full frames taken by the user,
|
/* Clean up the receive buffer for full frames taken by the user,
|
* then send an ACK if necessary. COPIED is the number of bytes
|
* then send an ACK if necessary. COPIED is the number of bytes
|
* tcp_recvmsg has given to the user so far, it speeds up the
|
* tcp_recvmsg has given to the user so far, it speeds up the
|
* calculation of whether or not we must ACK for the sake of
|
* calculation of whether or not we must ACK for the sake of
|
* a window update.
|
* a window update.
|
*/
|
*/
|
static void cleanup_rbuf(struct sock *sk, int copied)
|
static void cleanup_rbuf(struct sock *sk, int copied)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int time_to_ack = 0;
|
int time_to_ack = 0;
|
|
|
#if TCP_DEBUG
|
#if TCP_DEBUG
|
struct sk_buff *skb = skb_peek(&sk->receive_queue);
|
struct sk_buff *skb = skb_peek(&sk->receive_queue);
|
|
|
BUG_TRAP(skb==NULL || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
|
BUG_TRAP(skb==NULL || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
|
#endif
|
#endif
|
|
|
if (tcp_ack_scheduled(tp)) {
|
if (tcp_ack_scheduled(tp)) {
|
/* Delayed ACKs frequently hit locked sockets during bulk receive. */
|
/* Delayed ACKs frequently hit locked sockets during bulk receive. */
|
if (tp->ack.blocked
|
if (tp->ack.blocked
|
/* Once-per-two-segments ACK was not sent by tcp_input.c */
|
/* Once-per-two-segments ACK was not sent by tcp_input.c */
|
|| tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss
|
|| tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss
|
/*
|
/*
|
* If this read emptied read buffer, we send ACK, if
|
* If this read emptied read buffer, we send ACK, if
|
* connection is not bidirectional, user drained
|
* connection is not bidirectional, user drained
|
* receive buffer and there was a small segment
|
* receive buffer and there was a small segment
|
* in queue.
|
* in queue.
|
*/
|
*/
|
|| (copied > 0 &&
|
|| (copied > 0 &&
|
(tp->ack.pending&TCP_ACK_PUSHED) &&
|
(tp->ack.pending&TCP_ACK_PUSHED) &&
|
!tp->ack.pingpong &&
|
!tp->ack.pingpong &&
|
atomic_read(&sk->rmem_alloc) == 0)) {
|
atomic_read(&sk->rmem_alloc) == 0)) {
|
time_to_ack = 1;
|
time_to_ack = 1;
|
}
|
}
|
}
|
}
|
|
|
/* We send an ACK if we can now advertise a non-zero window
|
/* We send an ACK if we can now advertise a non-zero window
|
* which has been raised "significantly".
|
* which has been raised "significantly".
|
*
|
*
|
* Even if window raised up to infinity, do not send window open ACK
|
* Even if window raised up to infinity, do not send window open ACK
|
* in states, where we will not receive more. It is useless.
|
* in states, where we will not receive more. It is useless.
|
*/
|
*/
|
if(copied > 0 && !time_to_ack && !(sk->shutdown&RCV_SHUTDOWN)) {
|
if(copied > 0 && !time_to_ack && !(sk->shutdown&RCV_SHUTDOWN)) {
|
__u32 rcv_window_now = tcp_receive_window(tp);
|
__u32 rcv_window_now = tcp_receive_window(tp);
|
|
|
/* Optimize, __tcp_select_window() is not cheap. */
|
/* Optimize, __tcp_select_window() is not cheap. */
|
if (2*rcv_window_now <= tp->window_clamp) {
|
if (2*rcv_window_now <= tp->window_clamp) {
|
__u32 new_window = __tcp_select_window(sk);
|
__u32 new_window = __tcp_select_window(sk);
|
|
|
/* Send ACK now, if this read freed lots of space
|
/* Send ACK now, if this read freed lots of space
|
* in our buffer. Certainly, new_window is new window.
|
* in our buffer. Certainly, new_window is new window.
|
* We can advertise it now, if it is not less than current one.
|
* We can advertise it now, if it is not less than current one.
|
* "Lots" means "at least twice" here.
|
* "Lots" means "at least twice" here.
|
*/
|
*/
|
if(new_window && new_window >= 2*rcv_window_now)
|
if(new_window && new_window >= 2*rcv_window_now)
|
time_to_ack = 1;
|
time_to_ack = 1;
|
}
|
}
|
}
|
}
|
if (time_to_ack)
|
if (time_to_ack)
|
tcp_send_ack(sk);
|
tcp_send_ack(sk);
|
}
|
}
|
|
|
/* Now socket state including sk->err is changed only under lock,
|
/* Now socket state including sk->err is changed only under lock,
|
* hence we may omit checks after joining wait queue.
|
* hence we may omit checks after joining wait queue.
|
* We check receive queue before schedule() only as optimization;
|
* We check receive queue before schedule() only as optimization;
|
* it is very likely that release_sock() added new data.
|
* it is very likely that release_sock() added new data.
|
*/
|
*/
|
|
|
static long tcp_data_wait(struct sock *sk, long timeo)
|
static long tcp_data_wait(struct sock *sk, long timeo)
|
{
|
{
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
add_wait_queue(sk->sleep, &wait);
|
add_wait_queue(sk->sleep, &wait);
|
|
|
__set_current_state(TASK_INTERRUPTIBLE);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
|
|
set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
|
set_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
|
release_sock(sk);
|
release_sock(sk);
|
|
|
if (skb_queue_empty(&sk->receive_queue))
|
if (skb_queue_empty(&sk->receive_queue))
|
timeo = schedule_timeout(timeo);
|
timeo = schedule_timeout(timeo);
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
|
clear_bit(SOCK_ASYNC_WAITDATA, &sk->socket->flags);
|
|
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
return timeo;
|
return timeo;
|
}
|
}
|
|
|
static void tcp_prequeue_process(struct sock *sk)
|
static void tcp_prequeue_process(struct sock *sk)
|
{
|
{
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
|
|
net_statistics[smp_processor_id()*2+1].TCPPrequeued += skb_queue_len(&tp->ucopy.prequeue);
|
net_statistics[smp_processor_id()*2+1].TCPPrequeued += skb_queue_len(&tp->ucopy.prequeue);
|
|
|
/* RX process wants to run with disabled BHs, though it is not necessary */
|
/* RX process wants to run with disabled BHs, though it is not necessary */
|
local_bh_disable();
|
local_bh_disable();
|
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
|
sk->backlog_rcv(sk, skb);
|
sk->backlog_rcv(sk, skb);
|
local_bh_enable();
|
local_bh_enable();
|
|
|
/* Clear memory counter. */
|
/* Clear memory counter. */
|
tp->ucopy.memory = 0;
|
tp->ucopy.memory = 0;
|
}
|
}
|
|
|
static inline
|
static inline
|
struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
|
{
|
{
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
u32 offset;
|
u32 offset;
|
|
|
skb_queue_walk(&sk->receive_queue, skb) {
|
skb_queue_walk(&sk->receive_queue, skb) {
|
offset = seq - TCP_SKB_CB(skb)->seq;
|
offset = seq - TCP_SKB_CB(skb)->seq;
|
if (skb->h.th->syn)
|
if (skb->h.th->syn)
|
offset--;
|
offset--;
|
if (offset < skb->len || skb->h.th->fin) {
|
if (offset < skb->len || skb->h.th->fin) {
|
*off = offset;
|
*off = offset;
|
return skb;
|
return skb;
|
}
|
}
|
}
|
}
|
return NULL;
|
return NULL;
|
}
|
}
|
|
|
/*
|
/*
|
* This routine provides an alternative to tcp_recvmsg() for routines
|
* This routine provides an alternative to tcp_recvmsg() for routines
|
* that would like to handle copying from skbuffs directly in 'sendfile'
|
* that would like to handle copying from skbuffs directly in 'sendfile'
|
* fashion.
|
* fashion.
|
* Note:
|
* Note:
|
* - It is assumed that the socket was locked by the caller.
|
* - It is assumed that the socket was locked by the caller.
|
* - The routine does not block.
|
* - The routine does not block.
|
* - At present, there is no support for reading OOB data
|
* - At present, there is no support for reading OOB data
|
* or for 'peeking' the socket using this routine
|
* or for 'peeking' the socket using this routine
|
* (although both would be easy to implement).
|
* (although both would be easy to implement).
|
*/
|
*/
|
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
|
sk_read_actor_t recv_actor)
|
sk_read_actor_t recv_actor)
|
{
|
{
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
u32 seq = tp->copied_seq;
|
u32 seq = tp->copied_seq;
|
u32 offset;
|
u32 offset;
|
int copied = 0;
|
int copied = 0;
|
|
|
if (sk->state == TCP_LISTEN)
|
if (sk->state == TCP_LISTEN)
|
return -ENOTCONN;
|
return -ENOTCONN;
|
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
|
while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
|
if (offset < skb->len) {
|
if (offset < skb->len) {
|
size_t used, len;
|
size_t used, len;
|
|
|
len = skb->len - offset;
|
len = skb->len - offset;
|
/* Stop reading if we hit a patch of urgent data */
|
/* Stop reading if we hit a patch of urgent data */
|
if (tp->urg_data) {
|
if (tp->urg_data) {
|
u32 urg_offset = tp->urg_seq - seq;
|
u32 urg_offset = tp->urg_seq - seq;
|
if (urg_offset < len)
|
if (urg_offset < len)
|
len = urg_offset;
|
len = urg_offset;
|
if (!len)
|
if (!len)
|
break;
|
break;
|
}
|
}
|
used = recv_actor(desc, skb, offset, len);
|
used = recv_actor(desc, skb, offset, len);
|
if (used <= len) {
|
if (used <= len) {
|
seq += used;
|
seq += used;
|
copied += used;
|
copied += used;
|
offset += used;
|
offset += used;
|
}
|
}
|
if (offset != skb->len)
|
if (offset != skb->len)
|
break;
|
break;
|
}
|
}
|
if (skb->h.th->fin) {
|
if (skb->h.th->fin) {
|
tcp_eat_skb(sk, skb);
|
tcp_eat_skb(sk, skb);
|
++seq;
|
++seq;
|
break;
|
break;
|
}
|
}
|
tcp_eat_skb(sk, skb);
|
tcp_eat_skb(sk, skb);
|
if (!desc->count)
|
if (!desc->count)
|
break;
|
break;
|
}
|
}
|
tp->copied_seq = seq;
|
tp->copied_seq = seq;
|
/* Clean up data we have read: This will do ACK frames. */
|
/* Clean up data we have read: This will do ACK frames. */
|
if (copied)
|
if (copied)
|
cleanup_rbuf(sk, copied);
|
cleanup_rbuf(sk, copied);
|
return copied;
|
return copied;
|
}
|
}
|
|
|
/*
|
/*
|
* This routine copies from a sock struct into the user buffer.
|
* This routine copies from a sock struct into the user buffer.
|
*
|
*
|
* Technical note: in 2.3 we work on _locked_ socket, so that
|
* Technical note: in 2.3 we work on _locked_ socket, so that
|
* tricks with *seq access order and skb->users are not required.
|
* tricks with *seq access order and skb->users are not required.
|
* Probably, code can be easily improved even more.
|
* Probably, code can be easily improved even more.
|
*/
|
*/
|
|
|
int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
|
int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
|
int len, int nonblock, int flags, int *addr_len)
|
int len, int nonblock, int flags, int *addr_len)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int copied = 0;
|
int copied = 0;
|
u32 peek_seq;
|
u32 peek_seq;
|
u32 *seq;
|
u32 *seq;
|
unsigned long used;
|
unsigned long used;
|
int err;
|
int err;
|
int target; /* Read at least this many bytes */
|
int target; /* Read at least this many bytes */
|
long timeo;
|
long timeo;
|
struct task_struct *user_recv = NULL;
|
struct task_struct *user_recv = NULL;
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
|
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
|
|
err = -ENOTCONN;
|
err = -ENOTCONN;
|
if (sk->state == TCP_LISTEN)
|
if (sk->state == TCP_LISTEN)
|
goto out;
|
goto out;
|
|
|
timeo = sock_rcvtimeo(sk, nonblock);
|
timeo = sock_rcvtimeo(sk, nonblock);
|
|
|
/* Urgent data needs to be handled specially. */
|
/* Urgent data needs to be handled specially. */
|
if (flags & MSG_OOB)
|
if (flags & MSG_OOB)
|
goto recv_urg;
|
goto recv_urg;
|
|
|
seq = &tp->copied_seq;
|
seq = &tp->copied_seq;
|
if (flags & MSG_PEEK) {
|
if (flags & MSG_PEEK) {
|
peek_seq = tp->copied_seq;
|
peek_seq = tp->copied_seq;
|
seq = &peek_seq;
|
seq = &peek_seq;
|
}
|
}
|
|
|
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
|
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
|
|
|
do {
|
do {
|
struct sk_buff * skb;
|
struct sk_buff * skb;
|
u32 offset;
|
u32 offset;
|
|
|
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
|
/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
|
if (tp->urg_data && tp->urg_seq == *seq) {
|
if (tp->urg_data && tp->urg_seq == *seq) {
|
if (copied)
|
if (copied)
|
break;
|
break;
|
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
|
copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
|
break;
|
break;
|
}
|
}
|
}
|
}
|
|
|
/* Next get a buffer. */
|
/* Next get a buffer. */
|
|
|
skb = skb_peek(&sk->receive_queue);
|
skb = skb_peek(&sk->receive_queue);
|
do {
|
do {
|
if (!skb)
|
if (!skb)
|
break;
|
break;
|
|
|
/* Now that we have two receive queues this
|
/* Now that we have two receive queues this
|
* shouldn't happen.
|
* shouldn't happen.
|
*/
|
*/
|
if (before(*seq, TCP_SKB_CB(skb)->seq)) {
|
if (before(*seq, TCP_SKB_CB(skb)->seq)) {
|
printk(KERN_INFO "recvmsg bug: copied %X seq %X\n",
|
printk(KERN_INFO "recvmsg bug: copied %X seq %X\n",
|
*seq, TCP_SKB_CB(skb)->seq);
|
*seq, TCP_SKB_CB(skb)->seq);
|
break;
|
break;
|
}
|
}
|
offset = *seq - TCP_SKB_CB(skb)->seq;
|
offset = *seq - TCP_SKB_CB(skb)->seq;
|
if (skb->h.th->syn)
|
if (skb->h.th->syn)
|
offset--;
|
offset--;
|
if (offset < skb->len)
|
if (offset < skb->len)
|
goto found_ok_skb;
|
goto found_ok_skb;
|
if (skb->h.th->fin)
|
if (skb->h.th->fin)
|
goto found_fin_ok;
|
goto found_fin_ok;
|
BUG_TRAP(flags&MSG_PEEK);
|
BUG_TRAP(flags&MSG_PEEK);
|
skb = skb->next;
|
skb = skb->next;
|
} while (skb != (struct sk_buff *)&sk->receive_queue);
|
} while (skb != (struct sk_buff *)&sk->receive_queue);
|
|
|
/* Well, if we have backlog, try to process it now yet. */
|
/* Well, if we have backlog, try to process it now yet. */
|
|
|
if (copied >= target && sk->backlog.tail == NULL)
|
if (copied >= target && sk->backlog.tail == NULL)
|
break;
|
break;
|
|
|
if (copied) {
|
if (copied) {
|
if (sk->err ||
|
if (sk->err ||
|
sk->state == TCP_CLOSE ||
|
sk->state == TCP_CLOSE ||
|
(sk->shutdown & RCV_SHUTDOWN) ||
|
(sk->shutdown & RCV_SHUTDOWN) ||
|
!timeo ||
|
!timeo ||
|
signal_pending(current) ||
|
signal_pending(current) ||
|
(flags & MSG_PEEK))
|
(flags & MSG_PEEK))
|
break;
|
break;
|
} else {
|
} else {
|
if (sk->done)
|
if (sk->done)
|
break;
|
break;
|
|
|
if (sk->err) {
|
if (sk->err) {
|
copied = sock_error(sk);
|
copied = sock_error(sk);
|
break;
|
break;
|
}
|
}
|
|
|
if (sk->shutdown & RCV_SHUTDOWN)
|
if (sk->shutdown & RCV_SHUTDOWN)
|
break;
|
break;
|
|
|
if (sk->state == TCP_CLOSE) {
|
if (sk->state == TCP_CLOSE) {
|
if (!sk->done) {
|
if (!sk->done) {
|
/* This occurs when user tries to read
|
/* This occurs when user tries to read
|
* from never connected socket.
|
* from never connected socket.
|
*/
|
*/
|
copied = -ENOTCONN;
|
copied = -ENOTCONN;
|
break;
|
break;
|
}
|
}
|
break;
|
break;
|
}
|
}
|
|
|
if (!timeo) {
|
if (!timeo) {
|
copied = -EAGAIN;
|
copied = -EAGAIN;
|
break;
|
break;
|
}
|
}
|
|
|
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
copied = sock_intr_errno(timeo);
|
copied = sock_intr_errno(timeo);
|
break;
|
break;
|
}
|
}
|
}
|
}
|
|
|
cleanup_rbuf(sk, copied);
|
cleanup_rbuf(sk, copied);
|
|
|
if (tp->ucopy.task == user_recv) {
|
if (tp->ucopy.task == user_recv) {
|
/* Install new reader */
|
/* Install new reader */
|
if (user_recv == NULL && !(flags&(MSG_TRUNC|MSG_PEEK))) {
|
if (user_recv == NULL && !(flags&(MSG_TRUNC|MSG_PEEK))) {
|
user_recv = current;
|
user_recv = current;
|
tp->ucopy.task = user_recv;
|
tp->ucopy.task = user_recv;
|
tp->ucopy.iov = msg->msg_iov;
|
tp->ucopy.iov = msg->msg_iov;
|
}
|
}
|
|
|
tp->ucopy.len = len;
|
tp->ucopy.len = len;
|
|
|
BUG_TRAP(tp->copied_seq == tp->rcv_nxt || (flags&(MSG_PEEK|MSG_TRUNC)));
|
BUG_TRAP(tp->copied_seq == tp->rcv_nxt || (flags&(MSG_PEEK|MSG_TRUNC)));
|
|
|
/* Ugly... If prequeue is not empty, we have to
|
/* Ugly... If prequeue is not empty, we have to
|
* process it before releasing socket, otherwise
|
* process it before releasing socket, otherwise
|
* order will be broken at second iteration.
|
* order will be broken at second iteration.
|
* More elegant solution is required!!!
|
* More elegant solution is required!!!
|
*
|
*
|
* Look: we have the following (pseudo)queues:
|
* Look: we have the following (pseudo)queues:
|
*
|
*
|
* 1. packets in flight
|
* 1. packets in flight
|
* 2. backlog
|
* 2. backlog
|
* 3. prequeue
|
* 3. prequeue
|
* 4. receive_queue
|
* 4. receive_queue
|
*
|
*
|
* Each queue can be processed only if the next ones
|
* Each queue can be processed only if the next ones
|
* are empty. At this point we have empty receive_queue.
|
* are empty. At this point we have empty receive_queue.
|
* But prequeue _can_ be not empty after second iteration,
|
* But prequeue _can_ be not empty after second iteration,
|
* when we jumped to start of loop because backlog
|
* when we jumped to start of loop because backlog
|
* processing added something to receive_queue.
|
* processing added something to receive_queue.
|
* We cannot release_sock(), because backlog contains
|
* We cannot release_sock(), because backlog contains
|
* packets arrived _after_ prequeued ones.
|
* packets arrived _after_ prequeued ones.
|
*
|
*
|
* Shortly, algorithm is clear --- to process all
|
* Shortly, algorithm is clear --- to process all
|
* the queues in order. We could make it more directly,
|
* the queues in order. We could make it more directly,
|
* requeueing packets from backlog to prequeue, if
|
* requeueing packets from backlog to prequeue, if
|
* is not empty. It is more elegant, but eats cycles,
|
* is not empty. It is more elegant, but eats cycles,
|
* unfortunately.
|
* unfortunately.
|
*/
|
*/
|
if (skb_queue_len(&tp->ucopy.prequeue))
|
if (skb_queue_len(&tp->ucopy.prequeue))
|
goto do_prequeue;
|
goto do_prequeue;
|
|
|
/* __ Set realtime policy in scheduler __ */
|
/* __ Set realtime policy in scheduler __ */
|
}
|
}
|
|
|
if (copied >= target) {
|
if (copied >= target) {
|
/* Do not sleep, just process backlog. */
|
/* Do not sleep, just process backlog. */
|
release_sock(sk);
|
release_sock(sk);
|
lock_sock(sk);
|
lock_sock(sk);
|
} else {
|
} else {
|
timeo = tcp_data_wait(sk, timeo);
|
timeo = tcp_data_wait(sk, timeo);
|
}
|
}
|
|
|
if (user_recv) {
|
if (user_recv) {
|
int chunk;
|
int chunk;
|
|
|
/* __ Restore normal policy in scheduler __ */
|
/* __ Restore normal policy in scheduler __ */
|
|
|
if ((chunk = len - tp->ucopy.len) != 0) {
|
if ((chunk = len - tp->ucopy.len) != 0) {
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromBacklog += chunk;
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromBacklog += chunk;
|
len -= chunk;
|
len -= chunk;
|
copied += chunk;
|
copied += chunk;
|
}
|
}
|
|
|
if (tp->rcv_nxt == tp->copied_seq &&
|
if (tp->rcv_nxt == tp->copied_seq &&
|
skb_queue_len(&tp->ucopy.prequeue)) {
|
skb_queue_len(&tp->ucopy.prequeue)) {
|
do_prequeue:
|
do_prequeue:
|
tcp_prequeue_process(sk);
|
tcp_prequeue_process(sk);
|
|
|
if ((chunk = len - tp->ucopy.len) != 0) {
|
if ((chunk = len - tp->ucopy.len) != 0) {
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromPrequeue += chunk;
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromPrequeue += chunk;
|
len -= chunk;
|
len -= chunk;
|
copied += chunk;
|
copied += chunk;
|
}
|
}
|
}
|
}
|
}
|
}
|
if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
|
if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
|
if (net_ratelimit())
|
if (net_ratelimit())
|
printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
|
printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
|
current->comm, current->pid);
|
current->comm, current->pid);
|
peek_seq = tp->copied_seq;
|
peek_seq = tp->copied_seq;
|
}
|
}
|
continue;
|
continue;
|
|
|
found_ok_skb:
|
found_ok_skb:
|
/* Ok so how much can we use? */
|
/* Ok so how much can we use? */
|
used = skb->len - offset;
|
used = skb->len - offset;
|
if (len < used)
|
if (len < used)
|
used = len;
|
used = len;
|
|
|
/* Do we have urgent data here? */
|
/* Do we have urgent data here? */
|
if (tp->urg_data) {
|
if (tp->urg_data) {
|
u32 urg_offset = tp->urg_seq - *seq;
|
u32 urg_offset = tp->urg_seq - *seq;
|
if (urg_offset < used) {
|
if (urg_offset < used) {
|
if (!urg_offset) {
|
if (!urg_offset) {
|
if (!sk->urginline) {
|
if (!sk->urginline) {
|
++*seq;
|
++*seq;
|
offset++;
|
offset++;
|
used--;
|
used--;
|
if (!used)
|
if (!used)
|
goto skip_copy;
|
goto skip_copy;
|
}
|
}
|
} else
|
} else
|
used = urg_offset;
|
used = urg_offset;
|
}
|
}
|
}
|
}
|
|
|
if (!(flags&MSG_TRUNC)) {
|
if (!(flags&MSG_TRUNC)) {
|
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used);
|
err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, used);
|
if (err) {
|
if (err) {
|
/* Exception. Bailout! */
|
/* Exception. Bailout! */
|
if (!copied)
|
if (!copied)
|
copied = -EFAULT;
|
copied = -EFAULT;
|
break;
|
break;
|
}
|
}
|
}
|
}
|
|
|
*seq += used;
|
*seq += used;
|
copied += used;
|
copied += used;
|
len -= used;
|
len -= used;
|
|
|
skip_copy:
|
skip_copy:
|
if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) {
|
if (tp->urg_data && after(tp->copied_seq,tp->urg_seq)) {
|
tp->urg_data = 0;
|
tp->urg_data = 0;
|
tcp_fast_path_check(sk, tp);
|
tcp_fast_path_check(sk, tp);
|
}
|
}
|
if (used + offset < skb->len)
|
if (used + offset < skb->len)
|
continue;
|
continue;
|
|
|
if (skb->h.th->fin)
|
if (skb->h.th->fin)
|
goto found_fin_ok;
|
goto found_fin_ok;
|
if (!(flags & MSG_PEEK))
|
if (!(flags & MSG_PEEK))
|
tcp_eat_skb(sk, skb);
|
tcp_eat_skb(sk, skb);
|
continue;
|
continue;
|
|
|
found_fin_ok:
|
found_fin_ok:
|
/* Process the FIN. */
|
/* Process the FIN. */
|
++*seq;
|
++*seq;
|
if (!(flags & MSG_PEEK))
|
if (!(flags & MSG_PEEK))
|
tcp_eat_skb(sk, skb);
|
tcp_eat_skb(sk, skb);
|
break;
|
break;
|
} while (len > 0);
|
} while (len > 0);
|
|
|
if (user_recv) {
|
if (user_recv) {
|
if (skb_queue_len(&tp->ucopy.prequeue)) {
|
if (skb_queue_len(&tp->ucopy.prequeue)) {
|
int chunk;
|
int chunk;
|
|
|
tp->ucopy.len = copied > 0 ? len : 0;
|
tp->ucopy.len = copied > 0 ? len : 0;
|
|
|
tcp_prequeue_process(sk);
|
tcp_prequeue_process(sk);
|
|
|
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
|
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromPrequeue += chunk;
|
net_statistics[smp_processor_id()*2+1].TCPDirectCopyFromPrequeue += chunk;
|
len -= chunk;
|
len -= chunk;
|
copied += chunk;
|
copied += chunk;
|
}
|
}
|
}
|
}
|
|
|
tp->ucopy.task = NULL;
|
tp->ucopy.task = NULL;
|
tp->ucopy.len = 0;
|
tp->ucopy.len = 0;
|
}
|
}
|
|
|
/* According to UNIX98, msg_name/msg_namelen are ignored
|
/* According to UNIX98, msg_name/msg_namelen are ignored
|
* on connected socket. I was just happy when found this 8) --ANK
|
* on connected socket. I was just happy when found this 8) --ANK
|
*/
|
*/
|
|
|
/* Clean up data we have read: This will do ACK frames. */
|
/* Clean up data we have read: This will do ACK frames. */
|
cleanup_rbuf(sk, copied);
|
cleanup_rbuf(sk, copied);
|
|
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
release_sock(sk);
|
release_sock(sk);
|
return copied;
|
return copied;
|
|
|
out:
|
out:
|
TCP_CHECK_TIMER(sk);
|
TCP_CHECK_TIMER(sk);
|
release_sock(sk);
|
release_sock(sk);
|
return err;
|
return err;
|
|
|
recv_urg:
|
recv_urg:
|
err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
|
err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
|
goto out;
|
goto out;
|
}
|
}
|
|
|
/*
|
/*
|
* State processing on a close. This implements the state shift for
|
* State processing on a close. This implements the state shift for
|
* sending our FIN frame. Note that we only send a FIN for some
|
* sending our FIN frame. Note that we only send a FIN for some
|
* states. A shutdown() may have already sent the FIN, or we may be
|
* states. A shutdown() may have already sent the FIN, or we may be
|
* closed.
|
* closed.
|
*/
|
*/
|
|
|
static unsigned char new_state[16] = {
|
static unsigned char new_state[16] = {
|
/* current state: new state: action: */
|
/* current state: new state: action: */
|
/* (Invalid) */ TCP_CLOSE,
|
/* (Invalid) */ TCP_CLOSE,
|
/* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
|
/* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
|
/* TCP_SYN_SENT */ TCP_CLOSE,
|
/* TCP_SYN_SENT */ TCP_CLOSE,
|
/* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
|
/* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
|
/* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
|
/* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
|
/* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
|
/* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
|
/* TCP_TIME_WAIT */ TCP_CLOSE,
|
/* TCP_TIME_WAIT */ TCP_CLOSE,
|
/* TCP_CLOSE */ TCP_CLOSE,
|
/* TCP_CLOSE */ TCP_CLOSE,
|
/* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
|
/* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
|
/* TCP_LAST_ACK */ TCP_LAST_ACK,
|
/* TCP_LAST_ACK */ TCP_LAST_ACK,
|
/* TCP_LISTEN */ TCP_CLOSE,
|
/* TCP_LISTEN */ TCP_CLOSE,
|
/* TCP_CLOSING */ TCP_CLOSING,
|
/* TCP_CLOSING */ TCP_CLOSING,
|
};
|
};
|
|
|
static int tcp_close_state(struct sock *sk)
|
static int tcp_close_state(struct sock *sk)
|
{
|
{
|
int next = (int) new_state[sk->state];
|
int next = (int) new_state[sk->state];
|
int ns = (next & TCP_STATE_MASK);
|
int ns = (next & TCP_STATE_MASK);
|
|
|
tcp_set_state(sk, ns);
|
tcp_set_state(sk, ns);
|
|
|
return (next & TCP_ACTION_FIN);
|
return (next & TCP_ACTION_FIN);
|
}
|
}
|
|
|
/*
|
/*
|
* Shutdown the sending side of a connection. Much like close except
|
* Shutdown the sending side of a connection. Much like close except
|
* that we don't receive shut down or set sk->dead.
|
* that we don't receive shut down or set sk->dead.
|
*/
|
*/
|
|
|
void tcp_shutdown(struct sock *sk, int how)
|
void tcp_shutdown(struct sock *sk, int how)
|
{
|
{
|
/* We need to grab some memory, and put together a FIN,
|
/* We need to grab some memory, and put together a FIN,
|
* and then put it into the queue to be sent.
|
* and then put it into the queue to be sent.
|
* Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
|
* Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
|
*/
|
*/
|
if (!(how & SEND_SHUTDOWN))
|
if (!(how & SEND_SHUTDOWN))
|
return;
|
return;
|
|
|
/* If we've already sent a FIN, or it's a closed state, skip this. */
|
/* If we've already sent a FIN, or it's a closed state, skip this. */
|
if ((1 << sk->state) &
|
if ((1 << sk->state) &
|
(TCPF_ESTABLISHED|TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE_WAIT)) {
|
(TCPF_ESTABLISHED|TCPF_SYN_SENT|TCPF_SYN_RECV|TCPF_CLOSE_WAIT)) {
|
/* Clear out any half completed packets. FIN if needed. */
|
/* Clear out any half completed packets. FIN if needed. */
|
if (tcp_close_state(sk))
|
if (tcp_close_state(sk))
|
tcp_send_fin(sk);
|
tcp_send_fin(sk);
|
}
|
}
|
}
|
}
|
|
|
|
|
/*
|
/*
|
* Return 1 if we still have things to send in our buffers.
|
* Return 1 if we still have things to send in our buffers.
|
*/
|
*/
|
|
|
static inline int closing(struct sock * sk)
|
static inline int closing(struct sock * sk)
|
{
|
{
|
return ((1 << sk->state) & (TCPF_FIN_WAIT1|TCPF_CLOSING|TCPF_LAST_ACK));
|
return ((1 << sk->state) & (TCPF_FIN_WAIT1|TCPF_CLOSING|TCPF_LAST_ACK));
|
}
|
}
|
|
|
static __inline__ void tcp_kill_sk_queues(struct sock *sk)
|
static __inline__ void tcp_kill_sk_queues(struct sock *sk)
|
{
|
{
|
/* First the read buffer. */
|
/* First the read buffer. */
|
__skb_queue_purge(&sk->receive_queue);
|
__skb_queue_purge(&sk->receive_queue);
|
|
|
/* Next, the error queue. */
|
/* Next, the error queue. */
|
__skb_queue_purge(&sk->error_queue);
|
__skb_queue_purge(&sk->error_queue);
|
|
|
/* Next, the write queue. */
|
/* Next, the write queue. */
|
BUG_TRAP(skb_queue_empty(&sk->write_queue));
|
BUG_TRAP(skb_queue_empty(&sk->write_queue));
|
|
|
/* Account for returned memory. */
|
/* Account for returned memory. */
|
tcp_mem_reclaim(sk);
|
tcp_mem_reclaim(sk);
|
|
|
BUG_TRAP(sk->wmem_queued == 0);
|
BUG_TRAP(sk->wmem_queued == 0);
|
BUG_TRAP(sk->forward_alloc == 0);
|
BUG_TRAP(sk->forward_alloc == 0);
|
|
|
/* It is _impossible_ for the backlog to contain anything
|
/* It is _impossible_ for the backlog to contain anything
|
* when we get here. All user references to this socket
|
* when we get here. All user references to this socket
|
* have gone away, only the net layer knows can touch it.
|
* have gone away, only the net layer knows can touch it.
|
*/
|
*/
|
}
|
}
|
|
|
/*
|
/*
|
* At this point, there should be no process reference to this
|
* At this point, there should be no process reference to this
|
* socket, and thus no user references at all. Therefore we
|
* socket, and thus no user references at all. Therefore we
|
* can assume the socket waitqueue is inactive and nobody will
|
* can assume the socket waitqueue is inactive and nobody will
|
* try to jump onto it.
|
* try to jump onto it.
|
*/
|
*/
|
void tcp_destroy_sock(struct sock *sk)
|
void tcp_destroy_sock(struct sock *sk)
|
{
|
{
|
BUG_TRAP(sk->state==TCP_CLOSE);
|
BUG_TRAP(sk->state==TCP_CLOSE);
|
BUG_TRAP(sk->dead);
|
BUG_TRAP(sk->dead);
|
|
|
/* It cannot be in hash table! */
|
/* It cannot be in hash table! */
|
BUG_TRAP(sk->pprev==NULL);
|
BUG_TRAP(sk->pprev==NULL);
|
|
|
/* If it has not 0 sk->num, it must be bound */
|
/* If it has not 0 sk->num, it must be bound */
|
BUG_TRAP(!sk->num || sk->prev!=NULL);
|
BUG_TRAP(!sk->num || sk->prev!=NULL);
|
|
|
#ifdef TCP_DEBUG
|
#ifdef TCP_DEBUG
|
if (sk->zapped) {
|
if (sk->zapped) {
|
printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
|
printk(KERN_DEBUG "TCP: double destroy sk=%p\n", sk);
|
sock_hold(sk);
|
sock_hold(sk);
|
}
|
}
|
sk->zapped = 1;
|
sk->zapped = 1;
|
#endif
|
#endif
|
|
|
sk->prot->destroy(sk);
|
sk->prot->destroy(sk);
|
|
|
tcp_kill_sk_queues(sk);
|
tcp_kill_sk_queues(sk);
|
|
|
#ifdef INET_REFCNT_DEBUG
|
#ifdef INET_REFCNT_DEBUG
|
if (atomic_read(&sk->refcnt) != 1) {
|
if (atomic_read(&sk->refcnt) != 1) {
|
printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n", sk, atomic_read(&sk->refcnt));
|
printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n", sk, atomic_read(&sk->refcnt));
|
}
|
}
|
#endif
|
#endif
|
|
|
atomic_dec(&tcp_orphan_count);
|
atomic_dec(&tcp_orphan_count);
|
sock_put(sk);
|
sock_put(sk);
|
}
|
}
|
|
|
void tcp_close(struct sock *sk, long timeout)
|
void tcp_close(struct sock *sk, long timeout)
|
{
|
{
|
struct sk_buff *skb;
|
struct sk_buff *skb;
|
int data_was_unread = 0;
|
int data_was_unread = 0;
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
sk->shutdown = SHUTDOWN_MASK;
|
sk->shutdown = SHUTDOWN_MASK;
|
|
|
if(sk->state == TCP_LISTEN) {
|
if(sk->state == TCP_LISTEN) {
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_set_state(sk, TCP_CLOSE);
|
|
|
/* Special case. */
|
/* Special case. */
|
tcp_listen_stop(sk);
|
tcp_listen_stop(sk);
|
|
|
goto adjudge_to_death;
|
goto adjudge_to_death;
|
}
|
}
|
|
|
/* We need to flush the recv. buffs. We do this only on the
|
/* We need to flush the recv. buffs. We do this only on the
|
* descriptor close, not protocol-sourced closes, because the
|
* descriptor close, not protocol-sourced closes, because the
|
* reader process may not have drained the data yet!
|
* reader process may not have drained the data yet!
|
*/
|
*/
|
while((skb=__skb_dequeue(&sk->receive_queue))!=NULL) {
|
while((skb=__skb_dequeue(&sk->receive_queue))!=NULL) {
|
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - skb->h.th->fin;
|
u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - skb->h.th->fin;
|
data_was_unread += len;
|
data_was_unread += len;
|
__kfree_skb(skb);
|
__kfree_skb(skb);
|
}
|
}
|
|
|
tcp_mem_reclaim(sk);
|
tcp_mem_reclaim(sk);
|
|
|
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
|
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
|
* 3.10, we send a RST here because data was lost. To
|
* 3.10, we send a RST here because data was lost. To
|
* witness the awful effects of the old behavior of always
|
* witness the awful effects of the old behavior of always
|
* doing a FIN, run an older 2.1.x kernel or 2.0.x, start
|
* doing a FIN, run an older 2.1.x kernel or 2.0.x, start
|
* a bulk GET in an FTP client, suspend the process, wait
|
* a bulk GET in an FTP client, suspend the process, wait
|
* for the client to advertise a zero window, then kill -9
|
* for the client to advertise a zero window, then kill -9
|
* the FTP client, wheee... Note: timeout is always zero
|
* the FTP client, wheee... Note: timeout is always zero
|
* in such a case.
|
* in such a case.
|
*/
|
*/
|
if(data_was_unread != 0) {
|
if(data_was_unread != 0) {
|
/* Unread data was tossed, zap the connection. */
|
/* Unread data was tossed, zap the connection. */
|
NET_INC_STATS_USER(TCPAbortOnClose);
|
NET_INC_STATS_USER(TCPAbortOnClose);
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_send_active_reset(sk, GFP_KERNEL);
|
tcp_send_active_reset(sk, GFP_KERNEL);
|
} else if (sk->linger && sk->lingertime==0) {
|
} else if (sk->linger && sk->lingertime==0) {
|
/* Check zero linger _after_ checking for unread data. */
|
/* Check zero linger _after_ checking for unread data. */
|
sk->prot->disconnect(sk, 0);
|
sk->prot->disconnect(sk, 0);
|
NET_INC_STATS_USER(TCPAbortOnData);
|
NET_INC_STATS_USER(TCPAbortOnData);
|
} else if (tcp_close_state(sk)) {
|
} else if (tcp_close_state(sk)) {
|
/* We FIN if the application ate all the data before
|
/* We FIN if the application ate all the data before
|
* zapping the connection.
|
* zapping the connection.
|
*/
|
*/
|
|
|
/* RED-PEN. Formally speaking, we have broken TCP state
|
/* RED-PEN. Formally speaking, we have broken TCP state
|
* machine. State transitions:
|
* machine. State transitions:
|
*
|
*
|
* TCP_ESTABLISHED -> TCP_FIN_WAIT1
|
* TCP_ESTABLISHED -> TCP_FIN_WAIT1
|
* TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
|
* TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
|
* TCP_CLOSE_WAIT -> TCP_LAST_ACK
|
* TCP_CLOSE_WAIT -> TCP_LAST_ACK
|
*
|
*
|
* are legal only when FIN has been sent (i.e. in window),
|
* are legal only when FIN has been sent (i.e. in window),
|
* rather than queued out of window. Purists blame.
|
* rather than queued out of window. Purists blame.
|
*
|
*
|
* F.e. "RFC state" is ESTABLISHED,
|
* F.e. "RFC state" is ESTABLISHED,
|
* if Linux state is FIN-WAIT-1, but FIN is still not sent.
|
* if Linux state is FIN-WAIT-1, but FIN is still not sent.
|
*
|
*
|
* The visible declinations are that sometimes
|
* The visible declinations are that sometimes
|
* we enter time-wait state, when it is not required really
|
* we enter time-wait state, when it is not required really
|
* (harmless), do not send active resets, when they are
|
* (harmless), do not send active resets, when they are
|
* required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
|
* required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
|
* they look as CLOSING or LAST_ACK for Linux)
|
* they look as CLOSING or LAST_ACK for Linux)
|
* Probably, I missed some more holelets.
|
* Probably, I missed some more holelets.
|
* --ANK
|
* --ANK
|
*/
|
*/
|
tcp_send_fin(sk);
|
tcp_send_fin(sk);
|
}
|
}
|
|
|
if (timeout) {
|
if (timeout) {
|
struct task_struct *tsk = current;
|
struct task_struct *tsk = current;
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
|
|
add_wait_queue(sk->sleep, &wait);
|
add_wait_queue(sk->sleep, &wait);
|
|
|
do {
|
do {
|
set_current_state(TASK_INTERRUPTIBLE);
|
set_current_state(TASK_INTERRUPTIBLE);
|
if (!closing(sk))
|
if (!closing(sk))
|
break;
|
break;
|
release_sock(sk);
|
release_sock(sk);
|
timeout = schedule_timeout(timeout);
|
timeout = schedule_timeout(timeout);
|
lock_sock(sk);
|
lock_sock(sk);
|
} while (!signal_pending(tsk) && timeout);
|
} while (!signal_pending(tsk) && timeout);
|
|
|
tsk->state = TASK_RUNNING;
|
tsk->state = TASK_RUNNING;
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
}
|
}
|
|
|
adjudge_to_death:
|
adjudge_to_death:
|
/* It is the last release_sock in its life. It will remove backlog. */
|
/* It is the last release_sock in its life. It will remove backlog. */
|
release_sock(sk);
|
release_sock(sk);
|
|
|
|
|
/* Now socket is owned by kernel and we acquire BH lock
|
/* Now socket is owned by kernel and we acquire BH lock
|
to finish close. No need to check for user refs.
|
to finish close. No need to check for user refs.
|
*/
|
*/
|
local_bh_disable();
|
local_bh_disable();
|
bh_lock_sock(sk);
|
bh_lock_sock(sk);
|
BUG_TRAP(sk->lock.users==0);
|
BUG_TRAP(sk->lock.users==0);
|
|
|
sock_hold(sk);
|
sock_hold(sk);
|
sock_orphan(sk);
|
sock_orphan(sk);
|
|
|
/* This is a (useful) BSD violating of the RFC. There is a
|
/* This is a (useful) BSD violating of the RFC. There is a
|
* problem with TCP as specified in that the other end could
|
* problem with TCP as specified in that the other end could
|
* keep a socket open forever with no application left this end.
|
* keep a socket open forever with no application left this end.
|
* We use a 3 minute timeout (about the same as BSD) then kill
|
* We use a 3 minute timeout (about the same as BSD) then kill
|
* our end. If they send after that then tough - BUT: long enough
|
* our end. If they send after that then tough - BUT: long enough
|
* that we won't make the old 4*rto = almost no time - whoops
|
* that we won't make the old 4*rto = almost no time - whoops
|
* reset mistake.
|
* reset mistake.
|
*
|
*
|
* Nope, it was not mistake. It is really desired behaviour
|
* Nope, it was not mistake. It is really desired behaviour
|
* f.e. on http servers, when such sockets are useless, but
|
* f.e. on http servers, when such sockets are useless, but
|
* consume significant resources. Let's do it with special
|
* consume significant resources. Let's do it with special
|
* linger2 option. --ANK
|
* linger2 option. --ANK
|
*/
|
*/
|
|
|
if (sk->state == TCP_FIN_WAIT2) {
|
if (sk->state == TCP_FIN_WAIT2) {
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
if (tp->linger2 < 0) {
|
if (tp->linger2 < 0) {
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_send_active_reset(sk, GFP_ATOMIC);
|
tcp_send_active_reset(sk, GFP_ATOMIC);
|
NET_INC_STATS_BH(TCPAbortOnLinger);
|
NET_INC_STATS_BH(TCPAbortOnLinger);
|
} else {
|
} else {
|
int tmo = tcp_fin_time(tp);
|
int tmo = tcp_fin_time(tp);
|
|
|
if (tmo > TCP_TIMEWAIT_LEN) {
|
if (tmo > TCP_TIMEWAIT_LEN) {
|
tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
|
tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
|
} else {
|
} else {
|
atomic_inc(&tcp_orphan_count);
|
atomic_inc(&tcp_orphan_count);
|
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
|
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
|
goto out;
|
goto out;
|
}
|
}
|
}
|
}
|
}
|
}
|
if (sk->state != TCP_CLOSE) {
|
if (sk->state != TCP_CLOSE) {
|
tcp_mem_reclaim(sk);
|
tcp_mem_reclaim(sk);
|
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
|
if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
|
(sk->wmem_queued > SOCK_MIN_SNDBUF &&
|
(sk->wmem_queued > SOCK_MIN_SNDBUF &&
|
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
|
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
|
if (net_ratelimit())
|
if (net_ratelimit())
|
printk(KERN_INFO "TCP: too many of orphaned sockets\n");
|
printk(KERN_INFO "TCP: too many of orphaned sockets\n");
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_send_active_reset(sk, GFP_ATOMIC);
|
tcp_send_active_reset(sk, GFP_ATOMIC);
|
NET_INC_STATS_BH(TCPAbortOnMemory);
|
NET_INC_STATS_BH(TCPAbortOnMemory);
|
}
|
}
|
}
|
}
|
atomic_inc(&tcp_orphan_count);
|
atomic_inc(&tcp_orphan_count);
|
|
|
if (sk->state == TCP_CLOSE)
|
if (sk->state == TCP_CLOSE)
|
tcp_destroy_sock(sk);
|
tcp_destroy_sock(sk);
|
/* Otherwise, socket is reprieved until protocol close. */
|
/* Otherwise, socket is reprieved until protocol close. */
|
|
|
out:
|
out:
|
bh_unlock_sock(sk);
|
bh_unlock_sock(sk);
|
local_bh_enable();
|
local_bh_enable();
|
sock_put(sk);
|
sock_put(sk);
|
}
|
}
|
|
|
/* These states need RST on ABORT according to RFC793 */
|
/* These states need RST on ABORT according to RFC793 */
|
|
|
static inline int tcp_need_reset(int state)
|
static inline int tcp_need_reset(int state)
|
{
|
{
|
return ((1 << state) &
|
return ((1 << state) &
|
(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
|
(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
|
TCPF_FIN_WAIT2|TCPF_SYN_RECV));
|
TCPF_FIN_WAIT2|TCPF_SYN_RECV));
|
}
|
}
|
|
|
int tcp_disconnect(struct sock *sk, int flags)
|
int tcp_disconnect(struct sock *sk, int flags)
|
{
|
{
|
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
|
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
|
int old_state;
|
int old_state;
|
int err = 0;
|
int err = 0;
|
|
|
old_state = sk->state;
|
old_state = sk->state;
|
if (old_state != TCP_CLOSE)
|
if (old_state != TCP_CLOSE)
|
tcp_set_state(sk, TCP_CLOSE);
|
tcp_set_state(sk, TCP_CLOSE);
|
|
|
/* ABORT function of RFC793 */
|
/* ABORT function of RFC793 */
|
if (old_state == TCP_LISTEN) {
|
if (old_state == TCP_LISTEN) {
|
tcp_listen_stop(sk);
|
tcp_listen_stop(sk);
|
} else if (tcp_need_reset(old_state) ||
|
} else if (tcp_need_reset(old_state) ||
|
(tp->snd_nxt != tp->write_seq &&
|
(tp->snd_nxt != tp->write_seq &&
|
(1<<old_state)&(TCPF_CLOSING|TCPF_LAST_ACK))) {
|
(1<<old_state)&(TCPF_CLOSING|TCPF_LAST_ACK))) {
|
/* The last check adjusts for discrepance of Linux wrt. RFC
|
/* The last check adjusts for discrepance of Linux wrt. RFC
|
* states
|
* states
|
*/
|
*/
|
tcp_send_active_reset(sk, gfp_any());
|
tcp_send_active_reset(sk, gfp_any());
|
sk->err = ECONNRESET;
|
sk->err = ECONNRESET;
|
} else if (old_state == TCP_SYN_SENT)
|
} else if (old_state == TCP_SYN_SENT)
|
sk->err = ECONNRESET;
|
sk->err = ECONNRESET;
|
|
|
tcp_clear_xmit_timers(sk);
|
tcp_clear_xmit_timers(sk);
|
__skb_queue_purge(&sk->receive_queue);
|
__skb_queue_purge(&sk->receive_queue);
|
tcp_writequeue_purge(sk);
|
tcp_writequeue_purge(sk);
|
__skb_queue_purge(&tp->out_of_order_queue);
|
__skb_queue_purge(&tp->out_of_order_queue);
|
|
|
sk->dport = 0;
|
sk->dport = 0;
|
|
|
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
|
if (!(sk->userlocks&SOCK_BINDADDR_LOCK)) {
|
sk->rcv_saddr = 0;
|
sk->rcv_saddr = 0;
|
sk->saddr = 0;
|
sk->saddr = 0;
|
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
memset(&sk->net_pinfo.af_inet6.saddr, 0, 16);
|
memset(&sk->net_pinfo.af_inet6.saddr, 0, 16);
|
memset(&sk->net_pinfo.af_inet6.rcv_saddr, 0, 16);
|
memset(&sk->net_pinfo.af_inet6.rcv_saddr, 0, 16);
|
#endif
|
#endif
|
}
|
}
|
|
|
sk->shutdown = 0;
|
sk->shutdown = 0;
|
sk->done = 0;
|
sk->done = 0;
|
tp->srtt = 0;
|
tp->srtt = 0;
|
if ((tp->write_seq += tp->max_window+2) == 0)
|
if ((tp->write_seq += tp->max_window+2) == 0)
|
tp->write_seq = 1;
|
tp->write_seq = 1;
|
tp->backoff = 0;
|
tp->backoff = 0;
|
tp->snd_cwnd = 2;
|
tp->snd_cwnd = 2;
|
tp->probes_out = 0;
|
tp->probes_out = 0;
|
tp->packets_out = 0;
|
tp->packets_out = 0;
|
tp->snd_ssthresh = 0x7fffffff;
|
tp->snd_ssthresh = 0x7fffffff;
|
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
tp->ca_state = TCP_CA_Open;
|
tp->ca_state = TCP_CA_Open;
|
tcp_clear_retrans(tp);
|
tcp_clear_retrans(tp);
|
tcp_delack_init(tp);
|
tcp_delack_init(tp);
|
tp->send_head = NULL;
|
tp->send_head = NULL;
|
tp->saw_tstamp = 0;
|
tp->saw_tstamp = 0;
|
tcp_sack_reset(tp);
|
tcp_sack_reset(tp);
|
__sk_dst_reset(sk);
|
__sk_dst_reset(sk);
|
|
|
BUG_TRAP(!sk->num || sk->prev);
|
BUG_TRAP(!sk->num || sk->prev);
|
|
|
sk->error_report(sk);
|
sk->error_report(sk);
|
return err;
|
return err;
|
}
|
}
|
|
|
/*
|
/*
|
* Wait for an incoming connection, avoid race
|
* Wait for an incoming connection, avoid race
|
* conditions. This must be called with the socket locked.
|
* conditions. This must be called with the socket locked.
|
*/
|
*/
|
static int wait_for_connect(struct sock * sk, long timeo)
|
static int wait_for_connect(struct sock * sk, long timeo)
|
{
|
{
|
DECLARE_WAITQUEUE(wait, current);
|
DECLARE_WAITQUEUE(wait, current);
|
int err;
|
int err;
|
|
|
/*
|
/*
|
* True wake-one mechanism for incoming connections: only
|
* True wake-one mechanism for incoming connections: only
|
* one process gets woken up, not the 'whole herd'.
|
* one process gets woken up, not the 'whole herd'.
|
* Since we do not 'race & poll' for established sockets
|
* Since we do not 'race & poll' for established sockets
|
* anymore, the common case will execute the loop only once.
|
* anymore, the common case will execute the loop only once.
|
*
|
*
|
* Subtle issue: "add_wait_queue_exclusive()" will be added
|
* Subtle issue: "add_wait_queue_exclusive()" will be added
|
* after any current non-exclusive waiters, and we know that
|
* after any current non-exclusive waiters, and we know that
|
* it will always _stay_ after any new non-exclusive waiters
|
* it will always _stay_ after any new non-exclusive waiters
|
* because all non-exclusive waiters are added at the
|
* because all non-exclusive waiters are added at the
|
* beginning of the wait-queue. As such, it's ok to "drop"
|
* beginning of the wait-queue. As such, it's ok to "drop"
|
* our exclusiveness temporarily when we get woken up without
|
* our exclusiveness temporarily when we get woken up without
|
* having to remove and re-insert us on the wait queue.
|
* having to remove and re-insert us on the wait queue.
|
*/
|
*/
|
add_wait_queue_exclusive(sk->sleep, &wait);
|
add_wait_queue_exclusive(sk->sleep, &wait);
|
for (;;) {
|
for (;;) {
|
current->state = TASK_INTERRUPTIBLE;
|
current->state = TASK_INTERRUPTIBLE;
|
release_sock(sk);
|
release_sock(sk);
|
if (sk->tp_pinfo.af_tcp.accept_queue == NULL)
|
if (sk->tp_pinfo.af_tcp.accept_queue == NULL)
|
timeo = schedule_timeout(timeo);
|
timeo = schedule_timeout(timeo);
|
lock_sock(sk);
|
lock_sock(sk);
|
err = 0;
|
err = 0;
|
if (sk->tp_pinfo.af_tcp.accept_queue)
|
if (sk->tp_pinfo.af_tcp.accept_queue)
|
break;
|
break;
|
err = -EINVAL;
|
err = -EINVAL;
|
if (sk->state != TCP_LISTEN)
|
if (sk->state != TCP_LISTEN)
|
break;
|
break;
|
err = sock_intr_errno(timeo);
|
err = sock_intr_errno(timeo);
|
if (signal_pending(current))
|
if (signal_pending(current))
|
break;
|
break;
|
err = -EAGAIN;
|
err = -EAGAIN;
|
if (!timeo)
|
if (!timeo)
|
break;
|
break;
|
}
|
}
|
current->state = TASK_RUNNING;
|
current->state = TASK_RUNNING;
|
remove_wait_queue(sk->sleep, &wait);
|
remove_wait_queue(sk->sleep, &wait);
|
return err;
|
return err;
|
}
|
}
|
|
|
/*
|
/*
|
* This will accept the next outstanding connection.
|
* This will accept the next outstanding connection.
|
*/
|
*/
|
|
|
struct sock *tcp_accept(struct sock *sk, int flags, int *err)
|
struct sock *tcp_accept(struct sock *sk, int flags, int *err)
|
{
|
{
|
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
|
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
|
struct open_request *req;
|
struct open_request *req;
|
struct sock *newsk;
|
struct sock *newsk;
|
int error;
|
int error;
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
|
|
/* We need to make sure that this socket is listening,
|
/* We need to make sure that this socket is listening,
|
* and that it has something pending.
|
* and that it has something pending.
|
*/
|
*/
|
error = -EINVAL;
|
error = -EINVAL;
|
if (sk->state != TCP_LISTEN)
|
if (sk->state != TCP_LISTEN)
|
goto out;
|
goto out;
|
|
|
/* Find already established connection */
|
/* Find already established connection */
|
if (!tp->accept_queue) {
|
if (!tp->accept_queue) {
|
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
|
|
/* If this is a non blocking socket don't sleep */
|
/* If this is a non blocking socket don't sleep */
|
error = -EAGAIN;
|
error = -EAGAIN;
|
if (!timeo)
|
if (!timeo)
|
goto out;
|
goto out;
|
|
|
error = wait_for_connect(sk, timeo);
|
error = wait_for_connect(sk, timeo);
|
if (error)
|
if (error)
|
goto out;
|
goto out;
|
}
|
}
|
|
|
req = tp->accept_queue;
|
req = tp->accept_queue;
|
if ((tp->accept_queue = req->dl_next) == NULL)
|
if ((tp->accept_queue = req->dl_next) == NULL)
|
tp->accept_queue_tail = NULL;
|
tp->accept_queue_tail = NULL;
|
|
|
newsk = req->sk;
|
newsk = req->sk;
|
tcp_acceptq_removed(sk);
|
tcp_acceptq_removed(sk);
|
tcp_openreq_fastfree(req);
|
tcp_openreq_fastfree(req);
|
BUG_TRAP(newsk->state != TCP_SYN_RECV);
|
BUG_TRAP(newsk->state != TCP_SYN_RECV);
|
release_sock(sk);
|
release_sock(sk);
|
return newsk;
|
return newsk;
|
|
|
out:
|
out:
|
release_sock(sk);
|
release_sock(sk);
|
*err = error;
|
*err = error;
|
return NULL;
|
return NULL;
|
}
|
}
|
|
|
/*
|
/*
|
* Socket option code for TCP.
|
* Socket option code for TCP.
|
*/
|
*/
|
|
|
int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
|
int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
|
int optlen)
|
int optlen)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int val;
|
int val;
|
int err = 0;
|
int err = 0;
|
|
|
if (level != SOL_TCP)
|
if (level != SOL_TCP)
|
return tp->af_specific->setsockopt(sk, level, optname,
|
return tp->af_specific->setsockopt(sk, level, optname,
|
optval, optlen);
|
optval, optlen);
|
|
|
if(optlen<sizeof(int))
|
if(optlen<sizeof(int))
|
return -EINVAL;
|
return -EINVAL;
|
|
|
if (get_user(val, (int *)optval))
|
if (get_user(val, (int *)optval))
|
return -EFAULT;
|
return -EFAULT;
|
|
|
lock_sock(sk);
|
lock_sock(sk);
|
|
|
switch(optname) {
|
switch(optname) {
|
case TCP_MAXSEG:
|
case TCP_MAXSEG:
|
/* values greater than interface MTU won't take effect. however at
|
/* values greater than interface MTU won't take effect. however at
|
* the point when this call is done we typically don't yet know
|
* the point when this call is done we typically don't yet know
|
* which interface is going to be used
|
* which interface is going to be used
|
*/
|
*/
|
if(val < 8 || val > MAX_TCP_WINDOW) {
|
if(val < 8 || val > MAX_TCP_WINDOW) {
|
err = -EINVAL;
|
err = -EINVAL;
|
break;
|
break;
|
}
|
}
|
tp->user_mss = val;
|
tp->user_mss = val;
|
break;
|
break;
|
|
|
case TCP_NODELAY:
|
case TCP_NODELAY:
|
/* You cannot try to use this and TCP_CORK in
|
/* You cannot try to use this and TCP_CORK in
|
* tandem, so let the user know.
|
* tandem, so let the user know.
|
*/
|
*/
|
if (tp->nonagle == 2) {
|
if (tp->nonagle == 2) {
|
err = -EINVAL;
|
err = -EINVAL;
|
break;
|
break;
|
}
|
}
|
tp->nonagle = (val == 0) ? 0 : 1;
|
tp->nonagle = (val == 0) ? 0 : 1;
|
if (val)
|
if (val)
|
tcp_push_pending_frames(sk, tp);
|
tcp_push_pending_frames(sk, tp);
|
break;
|
break;
|
|
|
case TCP_CORK:
|
case TCP_CORK:
|
/* When set indicates to always queue non-full frames.
|
/* When set indicates to always queue non-full frames.
|
* Later the user clears this option and we transmit
|
* Later the user clears this option and we transmit
|
* any pending partial frames in the queue. This is
|
* any pending partial frames in the queue. This is
|
* meant to be used alongside sendfile() to get properly
|
* meant to be used alongside sendfile() to get properly
|
* filled frames when the user (for example) must write
|
* filled frames when the user (for example) must write
|
* out headers with a write() call first and then use
|
* out headers with a write() call first and then use
|
* sendfile to send out the data parts.
|
* sendfile to send out the data parts.
|
*
|
*
|
* You cannot try to use TCP_NODELAY and this mechanism
|
* You cannot try to use TCP_NODELAY and this mechanism
|
* at the same time, so let the user know.
|
* at the same time, so let the user know.
|
*/
|
*/
|
if (tp->nonagle == 1) {
|
if (tp->nonagle == 1) {
|
err = -EINVAL;
|
err = -EINVAL;
|
break;
|
break;
|
}
|
}
|
if (val != 0) {
|
if (val != 0) {
|
tp->nonagle = 2;
|
tp->nonagle = 2;
|
} else {
|
} else {
|
tp->nonagle = 0;
|
tp->nonagle = 0;
|
|
|
tcp_push_pending_frames(sk, tp);
|
tcp_push_pending_frames(sk, tp);
|
}
|
}
|
break;
|
break;
|
|
|
case TCP_KEEPIDLE:
|
case TCP_KEEPIDLE:
|
if (val < 1 || val > MAX_TCP_KEEPIDLE)
|
if (val < 1 || val > MAX_TCP_KEEPIDLE)
|
err = -EINVAL;
|
err = -EINVAL;
|
else {
|
else {
|
tp->keepalive_time = val * HZ;
|
tp->keepalive_time = val * HZ;
|
if (sk->keepopen && !((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))) {
|
if (sk->keepopen && !((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))) {
|
__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
|
__u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
|
if (tp->keepalive_time > elapsed)
|
if (tp->keepalive_time > elapsed)
|
elapsed = tp->keepalive_time - elapsed;
|
elapsed = tp->keepalive_time - elapsed;
|
else
|
else
|
elapsed = 0;
|
elapsed = 0;
|
tcp_reset_keepalive_timer(sk, elapsed);
|
tcp_reset_keepalive_timer(sk, elapsed);
|
}
|
}
|
}
|
}
|
break;
|
break;
|
case TCP_KEEPINTVL:
|
case TCP_KEEPINTVL:
|
if (val < 1 || val > MAX_TCP_KEEPINTVL)
|
if (val < 1 || val > MAX_TCP_KEEPINTVL)
|
err = -EINVAL;
|
err = -EINVAL;
|
else
|
else
|
tp->keepalive_intvl = val * HZ;
|
tp->keepalive_intvl = val * HZ;
|
break;
|
break;
|
case TCP_KEEPCNT:
|
case TCP_KEEPCNT:
|
if (val < 1 || val > MAX_TCP_KEEPCNT)
|
if (val < 1 || val > MAX_TCP_KEEPCNT)
|
err = -EINVAL;
|
err = -EINVAL;
|
else
|
else
|
tp->keepalive_probes = val;
|
tp->keepalive_probes = val;
|
break;
|
break;
|
case TCP_SYNCNT:
|
case TCP_SYNCNT:
|
if (val < 1 || val > MAX_TCP_SYNCNT)
|
if (val < 1 || val > MAX_TCP_SYNCNT)
|
err = -EINVAL;
|
err = -EINVAL;
|
else
|
else
|
tp->syn_retries = val;
|
tp->syn_retries = val;
|
break;
|
break;
|
|
|
case TCP_LINGER2:
|
case TCP_LINGER2:
|
if (val < 0)
|
if (val < 0)
|
tp->linger2 = -1;
|
tp->linger2 = -1;
|
else if (val > sysctl_tcp_fin_timeout/HZ)
|
else if (val > sysctl_tcp_fin_timeout/HZ)
|
tp->linger2 = 0;
|
tp->linger2 = 0;
|
else
|
else
|
tp->linger2 = val*HZ;
|
tp->linger2 = val*HZ;
|
break;
|
break;
|
|
|
case TCP_DEFER_ACCEPT:
|
case TCP_DEFER_ACCEPT:
|
tp->defer_accept = 0;
|
tp->defer_accept = 0;
|
if (val > 0) {
|
if (val > 0) {
|
/* Translate value in seconds to number of retransmits */
|
/* Translate value in seconds to number of retransmits */
|
while (tp->defer_accept < 32 && val > ((TCP_TIMEOUT_INIT/HZ)<<tp->defer_accept))
|
while (tp->defer_accept < 32 && val > ((TCP_TIMEOUT_INIT/HZ)<<tp->defer_accept))
|
tp->defer_accept++;
|
tp->defer_accept++;
|
tp->defer_accept++;
|
tp->defer_accept++;
|
}
|
}
|
break;
|
break;
|
|
|
case TCP_WINDOW_CLAMP:
|
case TCP_WINDOW_CLAMP:
|
if (val==0) {
|
if (val==0) {
|
if (sk->state != TCP_CLOSE) {
|
if (sk->state != TCP_CLOSE) {
|
err = -EINVAL;
|
err = -EINVAL;
|
break;
|
break;
|
}
|
}
|
tp->window_clamp = 0;
|
tp->window_clamp = 0;
|
} else {
|
} else {
|
tp->window_clamp = val<SOCK_MIN_RCVBUF/2 ?
|
tp->window_clamp = val<SOCK_MIN_RCVBUF/2 ?
|
SOCK_MIN_RCVBUF/2 : val;
|
SOCK_MIN_RCVBUF/2 : val;
|
}
|
}
|
break;
|
break;
|
|
|
case TCP_QUICKACK:
|
case TCP_QUICKACK:
|
if (!val) {
|
if (!val) {
|
tp->ack.pingpong = 1;
|
tp->ack.pingpong = 1;
|
} else {
|
} else {
|
tp->ack.pingpong = 0;
|
tp->ack.pingpong = 0;
|
if ((1<<sk->state)&(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT) &&
|
if ((1<<sk->state)&(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT) &&
|
tcp_ack_scheduled(tp)) {
|
tcp_ack_scheduled(tp)) {
|
tp->ack.pending |= TCP_ACK_PUSHED;
|
tp->ack.pending |= TCP_ACK_PUSHED;
|
cleanup_rbuf(sk, 1);
|
cleanup_rbuf(sk, 1);
|
if (!(val & 1))
|
if (!(val & 1))
|
tp->ack.pingpong = 1;
|
tp->ack.pingpong = 1;
|
}
|
}
|
}
|
}
|
break;
|
break;
|
|
|
default:
|
default:
|
err = -ENOPROTOOPT;
|
err = -ENOPROTOOPT;
|
break;
|
break;
|
};
|
};
|
release_sock(sk);
|
release_sock(sk);
|
return err;
|
return err;
|
}
|
}
|
|
|
int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
|
int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
|
int *optlen)
|
int *optlen)
|
{
|
{
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
|
int val, len;
|
int val, len;
|
|
|
if(level != SOL_TCP)
|
if(level != SOL_TCP)
|
return tp->af_specific->getsockopt(sk, level, optname,
|
return tp->af_specific->getsockopt(sk, level, optname,
|
optval, optlen);
|
optval, optlen);
|
|
|
if(get_user(len,optlen))
|
if(get_user(len,optlen))
|
return -EFAULT;
|
return -EFAULT;
|
|
|
len = min_t(unsigned int, len, sizeof(int));
|
len = min_t(unsigned int, len, sizeof(int));
|
|
|
if(len < 0)
|
if(len < 0)
|
return -EINVAL;
|
return -EINVAL;
|
|
|
switch(optname) {
|
switch(optname) {
|
case TCP_MAXSEG:
|
case TCP_MAXSEG:
|
val = tp->mss_cache;
|
val = tp->mss_cache;
|
if (val == 0 && ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN)))
|
if (val == 0 && ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN)))
|
val = tp->user_mss;
|
val = tp->user_mss;
|
break;
|
break;
|
case TCP_NODELAY:
|
case TCP_NODELAY:
|
val = (tp->nonagle == 1);
|
val = (tp->nonagle == 1);
|
break;
|
break;
|
case TCP_CORK:
|
case TCP_CORK:
|
val = (tp->nonagle == 2);
|
val = (tp->nonagle == 2);
|
break;
|
break;
|
case TCP_KEEPIDLE:
|
case TCP_KEEPIDLE:
|
val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time)/HZ;
|
val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time)/HZ;
|
break;
|
break;
|
case TCP_KEEPINTVL:
|
case TCP_KEEPINTVL:
|
val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl)/HZ;
|
val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl)/HZ;
|
break;
|
break;
|
case TCP_KEEPCNT:
|
case TCP_KEEPCNT:
|
val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
|
val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
|
break;
|
break;
|
case TCP_SYNCNT:
|
case TCP_SYNCNT:
|
val = tp->syn_retries ? : sysctl_tcp_syn_retries;
|
val = tp->syn_retries ? : sysctl_tcp_syn_retries;
|
break;
|
break;
|
case TCP_LINGER2:
|
case TCP_LINGER2:
|
val = tp->linger2;
|
val = tp->linger2;
|
if (val >= 0)
|
if (val >= 0)
|
val = (val ? : sysctl_tcp_fin_timeout)/HZ;
|
val = (val ? : sysctl_tcp_fin_timeout)/HZ;
|
break;
|
break;
|
case TCP_DEFER_ACCEPT:
|
case TCP_DEFER_ACCEPT:
|
val = tp->defer_accept == 0 ? 0 : ((TCP_TIMEOUT_INIT/HZ)<<(tp->defer_accept-1));
|
val = tp->defer_accept == 0 ? 0 : ((TCP_TIMEOUT_INIT/HZ)<<(tp->defer_accept-1));
|
break;
|
break;
|
case TCP_WINDOW_CLAMP:
|
case TCP_WINDOW_CLAMP:
|
val = tp->window_clamp;
|
val = tp->window_clamp;
|
break;
|
break;
|
case TCP_INFO:
|
case TCP_INFO:
|
{
|
{
|
struct tcp_info info;
|
struct tcp_info info;
|
u32 now = tcp_time_stamp;
|
u32 now = tcp_time_stamp;
|
|
|
if(get_user(len,optlen))
|
if(get_user(len,optlen))
|
return -EFAULT;
|
return -EFAULT;
|
info.tcpi_state = sk->state;
|
info.tcpi_state = sk->state;
|
info.tcpi_ca_state = tp->ca_state;
|
info.tcpi_ca_state = tp->ca_state;
|
info.tcpi_retransmits = tp->retransmits;
|
info.tcpi_retransmits = tp->retransmits;
|
info.tcpi_probes = tp->probes_out;
|
info.tcpi_probes = tp->probes_out;
|
info.tcpi_backoff = tp->backoff;
|
info.tcpi_backoff = tp->backoff;
|
info.tcpi_options = 0;
|
info.tcpi_options = 0;
|
if (tp->tstamp_ok)
|
if (tp->tstamp_ok)
|
info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
|
info.tcpi_options |= TCPI_OPT_TIMESTAMPS;
|
if (tp->sack_ok)
|
if (tp->sack_ok)
|
info.tcpi_options |= TCPI_OPT_SACK;
|
info.tcpi_options |= TCPI_OPT_SACK;
|
if (tp->wscale_ok) {
|
if (tp->wscale_ok) {
|
info.tcpi_options |= TCPI_OPT_WSCALE;
|
info.tcpi_options |= TCPI_OPT_WSCALE;
|
info.tcpi_snd_wscale = tp->snd_wscale;
|
info.tcpi_snd_wscale = tp->snd_wscale;
|
info.tcpi_rcv_wscale = tp->rcv_wscale;
|
info.tcpi_rcv_wscale = tp->rcv_wscale;
|
} else {
|
} else {
|
info.tcpi_snd_wscale = 0;
|
info.tcpi_snd_wscale = 0;
|
info.tcpi_rcv_wscale = 0;
|
info.tcpi_rcv_wscale = 0;
|
}
|
}
|
if (tp->ecn_flags&TCP_ECN_OK)
|
if (tp->ecn_flags&TCP_ECN_OK)
|
info.tcpi_options |= TCPI_OPT_ECN;
|
info.tcpi_options |= TCPI_OPT_ECN;
|
|
|
info.tcpi_rto = (1000000*tp->rto)/HZ;
|
info.tcpi_rto = (1000000*tp->rto)/HZ;
|
info.tcpi_ato = (1000000*tp->ack.ato)/HZ;
|
info.tcpi_ato = (1000000*tp->ack.ato)/HZ;
|
info.tcpi_snd_mss = tp->mss_cache;
|
info.tcpi_snd_mss = tp->mss_cache;
|
info.tcpi_rcv_mss = tp->ack.rcv_mss;
|
info.tcpi_rcv_mss = tp->ack.rcv_mss;
|
|
|
info.tcpi_unacked = tp->packets_out;
|
info.tcpi_unacked = tp->packets_out;
|
info.tcpi_sacked = tp->sacked_out;
|
info.tcpi_sacked = tp->sacked_out;
|
info.tcpi_lost = tp->lost_out;
|
info.tcpi_lost = tp->lost_out;
|
info.tcpi_retrans = tp->retrans_out;
|
info.tcpi_retrans = tp->retrans_out;
|
info.tcpi_fackets = tp->fackets_out;
|
info.tcpi_fackets = tp->fackets_out;
|
|
|
info.tcpi_last_data_sent = ((now - tp->lsndtime)*1000)/HZ;
|
info.tcpi_last_data_sent = ((now - tp->lsndtime)*1000)/HZ;
|
info.tcpi_last_ack_sent = 0;
|
info.tcpi_last_ack_sent = 0;
|
info.tcpi_last_data_recv = ((now - tp->ack.lrcvtime)*1000)/HZ;
|
info.tcpi_last_data_recv = ((now - tp->ack.lrcvtime)*1000)/HZ;
|
info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp)*1000)/HZ;
|
info.tcpi_last_ack_recv = ((now - tp->rcv_tstamp)*1000)/HZ;
|
|
|
info.tcpi_pmtu = tp->pmtu_cookie;
|
info.tcpi_pmtu = tp->pmtu_cookie;
|
info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
|
info.tcpi_rcv_ssthresh = tp->rcv_ssthresh;
|
info.tcpi_rtt = ((1000000*tp->srtt)/HZ)>>3;
|
info.tcpi_rtt = ((1000000*tp->srtt)/HZ)>>3;
|
info.tcpi_rttvar = ((1000000*tp->mdev)/HZ)>>2;
|
info.tcpi_rttvar = ((1000000*tp->mdev)/HZ)>>2;
|
info.tcpi_snd_ssthresh = tp->snd_ssthresh;
|
info.tcpi_snd_ssthresh = tp->snd_ssthresh;
|
info.tcpi_snd_cwnd = tp->snd_cwnd;
|
info.tcpi_snd_cwnd = tp->snd_cwnd;
|
info.tcpi_advmss = tp->advmss;
|
info.tcpi_advmss = tp->advmss;
|
info.tcpi_reordering = tp->reordering;
|
info.tcpi_reordering = tp->reordering;
|
|
|
len = min_t(unsigned int, len, sizeof(info));
|
len = min_t(unsigned int, len, sizeof(info));
|
if(put_user(len, optlen))
|
if(put_user(len, optlen))
|
return -EFAULT;
|
return -EFAULT;
|
if(copy_to_user(optval, &info,len))
|
if(copy_to_user(optval, &info,len))
|
return -EFAULT;
|
return -EFAULT;
|
return 0;
|
return 0;
|
}
|
}
|
case TCP_QUICKACK:
|
case TCP_QUICKACK:
|
val = !tp->ack.pingpong;
|
val = !tp->ack.pingpong;
|
break;
|
break;
|
default:
|
default:
|
return -ENOPROTOOPT;
|
return -ENOPROTOOPT;
|
};
|
};
|
|
|
if(put_user(len, optlen))
|
if(put_user(len, optlen))
|
return -EFAULT;
|
return -EFAULT;
|
if(copy_to_user(optval, &val,len))
|
if(copy_to_user(optval, &val,len))
|
return -EFAULT;
|
return -EFAULT;
|
return 0;
|
return 0;
|
}
|
}
|
|
|
|
|
extern void __skb_cb_too_small_for_tcp(int, int);
|
extern void __skb_cb_too_small_for_tcp(int, int);
|
extern void tcpdiag_init(void);
|
extern void tcpdiag_init(void);
|
|
|
void __init tcp_init(void)
|
void __init tcp_init(void)
|
{
|
{
|
struct sk_buff *skb = NULL;
|
struct sk_buff *skb = NULL;
|
unsigned long goal;
|
unsigned long goal;
|
int order, i;
|
int order, i;
|
|
|
if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
|
if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
|
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
|
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
|
sizeof(skb->cb));
|
sizeof(skb->cb));
|
|
|
tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
|
tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
|
sizeof(struct open_request),
|
sizeof(struct open_request),
|
0, SLAB_HWCACHE_ALIGN,
|
0, SLAB_HWCACHE_ALIGN,
|
NULL, NULL);
|
NULL, NULL);
|
if(!tcp_openreq_cachep)
|
if(!tcp_openreq_cachep)
|
panic("tcp_init: Cannot alloc open_request cache.");
|
panic("tcp_init: Cannot alloc open_request cache.");
|
|
|
tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
|
tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
|
sizeof(struct tcp_bind_bucket),
|
sizeof(struct tcp_bind_bucket),
|
0, SLAB_HWCACHE_ALIGN,
|
0, SLAB_HWCACHE_ALIGN,
|
NULL, NULL);
|
NULL, NULL);
|
if(!tcp_bucket_cachep)
|
if(!tcp_bucket_cachep)
|
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
|
panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
|
|
|
tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
|
tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
|
sizeof(struct tcp_tw_bucket),
|
sizeof(struct tcp_tw_bucket),
|
0, SLAB_HWCACHE_ALIGN,
|
0, SLAB_HWCACHE_ALIGN,
|
NULL, NULL);
|
NULL, NULL);
|
if(!tcp_timewait_cachep)
|
if(!tcp_timewait_cachep)
|
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
|
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
|
|
|
/* Size and allocate the main established and bind bucket
|
/* Size and allocate the main established and bind bucket
|
* hash tables.
|
* hash tables.
|
*
|
*
|
* The methodology is similar to that of the buffer cache.
|
* The methodology is similar to that of the buffer cache.
|
*/
|
*/
|
if (num_physpages >= (128 * 1024))
|
if (num_physpages >= (128 * 1024))
|
goal = num_physpages >> (21 - PAGE_SHIFT);
|
goal = num_physpages >> (21 - PAGE_SHIFT);
|
else
|
else
|
goal = num_physpages >> (23 - PAGE_SHIFT);
|
goal = num_physpages >> (23 - PAGE_SHIFT);
|
|
|
for(order = 0; (1UL << order) < goal; order++)
|
for(order = 0; (1UL << order) < goal; order++)
|
;
|
;
|
do {
|
do {
|
tcp_ehash_size = (1UL << order) * PAGE_SIZE /
|
tcp_ehash_size = (1UL << order) * PAGE_SIZE /
|
sizeof(struct tcp_ehash_bucket);
|
sizeof(struct tcp_ehash_bucket);
|
tcp_ehash_size >>= 1;
|
tcp_ehash_size >>= 1;
|
while (tcp_ehash_size & (tcp_ehash_size-1))
|
while (tcp_ehash_size & (tcp_ehash_size-1))
|
tcp_ehash_size--;
|
tcp_ehash_size--;
|
tcp_ehash = (struct tcp_ehash_bucket *)
|
tcp_ehash = (struct tcp_ehash_bucket *)
|
__get_free_pages(GFP_ATOMIC, order);
|
__get_free_pages(GFP_ATOMIC, order);
|
} while (tcp_ehash == NULL && --order > 0);
|
} while (tcp_ehash == NULL && --order > 0);
|
|
|
if (!tcp_ehash)
|
if (!tcp_ehash)
|
panic("Failed to allocate TCP established hash table\n");
|
panic("Failed to allocate TCP established hash table\n");
|
for (i = 0; i < (tcp_ehash_size<<1); i++) {
|
for (i = 0; i < (tcp_ehash_size<<1); i++) {
|
tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
|
tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
|
tcp_ehash[i].chain = NULL;
|
tcp_ehash[i].chain = NULL;
|
}
|
}
|
|
|
do {
|
do {
|
tcp_bhash_size = (1UL << order) * PAGE_SIZE /
|
tcp_bhash_size = (1UL << order) * PAGE_SIZE /
|
sizeof(struct tcp_bind_hashbucket);
|
sizeof(struct tcp_bind_hashbucket);
|
if ((tcp_bhash_size > (64 * 1024)) && order > 0)
|
if ((tcp_bhash_size > (64 * 1024)) && order > 0)
|
continue;
|
continue;
|
tcp_bhash = (struct tcp_bind_hashbucket *)
|
tcp_bhash = (struct tcp_bind_hashbucket *)
|
__get_free_pages(GFP_ATOMIC, order);
|
__get_free_pages(GFP_ATOMIC, order);
|
} while (tcp_bhash == NULL && --order >= 0);
|
} while (tcp_bhash == NULL && --order >= 0);
|
|
|
if (!tcp_bhash)
|
if (!tcp_bhash)
|
panic("Failed to allocate TCP bind hash table\n");
|
panic("Failed to allocate TCP bind hash table\n");
|
for (i = 0; i < tcp_bhash_size; i++) {
|
for (i = 0; i < tcp_bhash_size; i++) {
|
tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
|
tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
|
tcp_bhash[i].chain = NULL;
|
tcp_bhash[i].chain = NULL;
|
}
|
}
|
|
|
/* Try to be a bit smarter and adjust defaults depending
|
/* Try to be a bit smarter and adjust defaults depending
|
* on available memory.
|
* on available memory.
|
*/
|
*/
|
if (order > 4) {
|
if (order > 4) {
|
sysctl_local_port_range[0] = 32768;
|
sysctl_local_port_range[0] = 32768;
|
sysctl_local_port_range[1] = 61000;
|
sysctl_local_port_range[1] = 61000;
|
sysctl_tcp_max_tw_buckets = 180000;
|
sysctl_tcp_max_tw_buckets = 180000;
|
sysctl_tcp_max_orphans = 4096<<(order-4);
|
sysctl_tcp_max_orphans = 4096<<(order-4);
|
sysctl_max_syn_backlog = 1024;
|
sysctl_max_syn_backlog = 1024;
|
} else if (order < 3) {
|
} else if (order < 3) {
|
sysctl_local_port_range[0] = 1024*(3-order);
|
sysctl_local_port_range[0] = 1024*(3-order);
|
sysctl_tcp_max_tw_buckets >>= (3-order);
|
sysctl_tcp_max_tw_buckets >>= (3-order);
|
sysctl_tcp_max_orphans >>= (3-order);
|
sysctl_tcp_max_orphans >>= (3-order);
|
sysctl_max_syn_backlog = 128;
|
sysctl_max_syn_backlog = 128;
|
}
|
}
|
tcp_port_rover = sysctl_local_port_range[0] - 1;
|
tcp_port_rover = sysctl_local_port_range[0] - 1;
|
|
|
sysctl_tcp_mem[0] = 768<<order;
|
sysctl_tcp_mem[0] = 768<<order;
|
sysctl_tcp_mem[1] = 1024<<order;
|
sysctl_tcp_mem[1] = 1024<<order;
|
sysctl_tcp_mem[2] = 1536<<order;
|
sysctl_tcp_mem[2] = 1536<<order;
|
if (sysctl_tcp_mem[2] - sysctl_tcp_mem[1] > 512)
|
if (sysctl_tcp_mem[2] - sysctl_tcp_mem[1] > 512)
|
sysctl_tcp_mem[1] = sysctl_tcp_mem[2] - 512;
|
sysctl_tcp_mem[1] = sysctl_tcp_mem[2] - 512;
|
if (sysctl_tcp_mem[1] - sysctl_tcp_mem[0] > 512)
|
if (sysctl_tcp_mem[1] - sysctl_tcp_mem[0] > 512)
|
sysctl_tcp_mem[0] = sysctl_tcp_mem[1] - 512;
|
sysctl_tcp_mem[0] = sysctl_tcp_mem[1] - 512;
|
|
|
if (order < 3) {
|
if (order < 3) {
|
sysctl_tcp_wmem[2] = 64*1024;
|
sysctl_tcp_wmem[2] = 64*1024;
|
sysctl_tcp_rmem[0] = PAGE_SIZE;
|
sysctl_tcp_rmem[0] = PAGE_SIZE;
|
sysctl_tcp_rmem[1] = 43689;
|
sysctl_tcp_rmem[1] = 43689;
|
sysctl_tcp_rmem[2] = 2*43689;
|
sysctl_tcp_rmem[2] = 2*43689;
|
}
|
}
|
|
|
printk(KERN_INFO "TCP: Hash tables configured (established %d bind %d)\n",
|
printk(KERN_INFO "TCP: Hash tables configured (established %d bind %d)\n",
|
tcp_ehash_size<<1, tcp_bhash_size);
|
tcp_ehash_size<<1, tcp_bhash_size);
|
|
|
(void) tcp_mib_init();
|
(void) tcp_mib_init();
|
tcpdiag_init();
|
tcpdiag_init();
|
}
|
}
|
|
|