1 |
1275 |
phoenix |
/*
|
2 |
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
3 |
|
|
* operating system. INET is implemented using the BSD Socket
|
4 |
|
|
* interface as the means of communication with the user level.
|
5 |
|
|
*
|
6 |
|
|
* Definitions for the AF_INET socket handler.
|
7 |
|
|
*
|
8 |
|
|
* Version: @(#)sock.h 1.0.4 05/13/93
|
9 |
|
|
*
|
10 |
|
|
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
|
11 |
|
|
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
|
12 |
|
|
* Corey Minyard <wf-rch!minyard@relay.EU.net>
|
13 |
|
|
* Florian La Roche <flla@stud.uni-sb.de>
|
14 |
|
|
*
|
15 |
|
|
* Fixes:
|
16 |
|
|
* Alan Cox : Volatiles in skbuff pointers. See
|
17 |
|
|
* skbuff comments. May be overdone,
|
18 |
|
|
* better to prove they can be removed
|
19 |
|
|
* than the reverse.
|
20 |
|
|
* Alan Cox : Added a zapped field for tcp to note
|
21 |
|
|
* a socket is reset and must stay shut up
|
22 |
|
|
* Alan Cox : New fields for options
|
23 |
|
|
* Pauline Middelink : identd support
|
24 |
|
|
* Alan Cox : Eliminate low level recv/recvfrom
|
25 |
|
|
* David S. Miller : New socket lookup architecture.
|
26 |
|
|
* Steve Whitehouse: Default routines for sock_ops
|
27 |
|
|
*
|
28 |
|
|
* This program is free software; you can redistribute it and/or
|
29 |
|
|
* modify it under the terms of the GNU General Public License
|
30 |
|
|
* as published by the Free Software Foundation; either version
|
31 |
|
|
* 2 of the License, or (at your option) any later version.
|
32 |
|
|
*/
|
33 |
|
|
#ifndef _SOCK_H
|
34 |
|
|
#define _SOCK_H
|
35 |
|
|
|
36 |
|
|
#include <linux/config.h>
|
37 |
|
|
#include <linux/timer.h>
|
38 |
|
|
#include <linux/cache.h>
|
39 |
|
|
#include <linux/in.h> /* struct sockaddr_in */
|
40 |
|
|
|
41 |
|
|
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
42 |
|
|
#include <linux/in6.h> /* struct sockaddr_in6 */
|
43 |
|
|
#include <linux/ipv6.h> /* dest_cache, inet6_options */
|
44 |
|
|
#include <linux/icmpv6.h>
|
45 |
|
|
#include <net/if_inet6.h> /* struct ipv6_mc_socklist */
|
46 |
|
|
#endif
|
47 |
|
|
|
48 |
|
|
#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
|
49 |
|
|
#include <linux/icmp.h>
|
50 |
|
|
#endif
|
51 |
|
|
#include <linux/tcp.h> /* struct tcphdr */
|
52 |
|
|
#if defined(CONFIG_IP_SCTP) || defined (CONFIG_IP_SCTP_MODULE)
|
53 |
|
|
#include <net/sctp/structs.h> /* struct sctp_opt */
|
54 |
|
|
#endif
|
55 |
|
|
|
56 |
|
|
#include <linux/netdevice.h>
|
57 |
|
|
#include <linux/skbuff.h> /* struct sk_buff */
|
58 |
|
|
#include <net/protocol.h> /* struct inet_protocol */
|
59 |
|
|
#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
|
60 |
|
|
#include <net/x25.h>
|
61 |
|
|
#endif
|
62 |
|
|
#if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
|
63 |
|
|
#include <linux/if_wanpipe.h>
|
64 |
|
|
#endif
|
65 |
|
|
|
66 |
|
|
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
67 |
|
|
#include <net/ax25.h>
|
68 |
|
|
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
|
69 |
|
|
#include <net/netrom.h>
|
70 |
|
|
#endif
|
71 |
|
|
#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
|
72 |
|
|
#include <net/rose.h>
|
73 |
|
|
#endif
|
74 |
|
|
#endif
|
75 |
|
|
|
76 |
|
|
#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
|
77 |
|
|
#include <linux/if_pppox.h>
|
78 |
|
|
#include <linux/ppp_channel.h> /* struct ppp_channel */
|
79 |
|
|
#endif
|
80 |
|
|
|
81 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
82 |
|
|
#if defined(CONFIG_SPX) || defined(CONFIG_SPX_MODULE)
|
83 |
|
|
#include <net/spx.h>
|
84 |
|
|
#else
|
85 |
|
|
#include <net/ipx.h>
|
86 |
|
|
#endif /* CONFIG_SPX */
|
87 |
|
|
#endif /* CONFIG_IPX */
|
88 |
|
|
|
89 |
|
|
#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
|
90 |
|
|
#include <linux/atalk.h>
|
91 |
|
|
#endif
|
92 |
|
|
|
93 |
|
|
#if defined(CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
|
94 |
|
|
#include <net/dn.h>
|
95 |
|
|
#endif
|
96 |
|
|
|
97 |
|
|
#if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
|
98 |
|
|
#include <net/irda/irda.h>
|
99 |
|
|
#endif
|
100 |
|
|
|
101 |
|
|
#if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
|
102 |
|
|
struct atm_vcc;
|
103 |
|
|
#endif
|
104 |
|
|
|
105 |
|
|
#ifdef CONFIG_FILTER
|
106 |
|
|
#include <linux/filter.h>
|
107 |
|
|
#endif
|
108 |
|
|
|
109 |
|
|
#include <asm/atomic.h>
|
110 |
|
|
#include <net/dst.h>
|
111 |
|
|
|
112 |
|
|
|
113 |
|
|
/* The AF_UNIX specific socket options */
|
114 |
|
|
struct unix_opt {
|
115 |
|
|
struct unix_address *addr;
|
116 |
|
|
struct dentry * dentry;
|
117 |
|
|
struct vfsmount * mnt;
|
118 |
|
|
struct semaphore readsem;
|
119 |
|
|
struct sock * other;
|
120 |
|
|
struct sock ** list;
|
121 |
|
|
struct sock * gc_tree;
|
122 |
|
|
atomic_t inflight;
|
123 |
|
|
rwlock_t lock;
|
124 |
|
|
wait_queue_head_t peer_wait;
|
125 |
|
|
};
|
126 |
|
|
|
127 |
|
|
|
128 |
|
|
/* Once the IPX ncpd patches are in these are going into protinfo. */
|
129 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
130 |
|
|
struct ipx_opt {
|
131 |
|
|
ipx_address dest_addr;
|
132 |
|
|
ipx_interface *intrfc;
|
133 |
|
|
unsigned short port;
|
134 |
|
|
#ifdef CONFIG_IPX_INTERN
|
135 |
|
|
unsigned char node[IPX_NODE_LEN];
|
136 |
|
|
#endif
|
137 |
|
|
unsigned short type;
|
138 |
|
|
/*
|
139 |
|
|
* To handle special ncp connection-handling sockets for mars_nwe,
|
140 |
|
|
* the connection number must be stored in the socket.
|
141 |
|
|
*/
|
142 |
|
|
unsigned short ipx_ncp_conn;
|
143 |
|
|
};
|
144 |
|
|
#endif
|
145 |
|
|
|
146 |
|
|
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
147 |
|
|
struct ipv6_pinfo {
|
148 |
|
|
struct in6_addr saddr;
|
149 |
|
|
struct in6_addr rcv_saddr;
|
150 |
|
|
struct in6_addr daddr;
|
151 |
|
|
struct in6_addr *daddr_cache;
|
152 |
|
|
|
153 |
|
|
__u32 flow_label;
|
154 |
|
|
__u32 frag_size;
|
155 |
|
|
int hop_limit;
|
156 |
|
|
int mcast_hops;
|
157 |
|
|
int mcast_oif;
|
158 |
|
|
|
159 |
|
|
/* pktoption flags */
|
160 |
|
|
union {
|
161 |
|
|
struct {
|
162 |
|
|
__u8 srcrt:2,
|
163 |
|
|
rxinfo:1,
|
164 |
|
|
rxhlim:1,
|
165 |
|
|
hopopts:1,
|
166 |
|
|
dstopts:1,
|
167 |
|
|
authhdr:1,
|
168 |
|
|
rxflow:1;
|
169 |
|
|
} bits;
|
170 |
|
|
__u8 all;
|
171 |
|
|
} rxopt;
|
172 |
|
|
|
173 |
|
|
/* sockopt flags */
|
174 |
|
|
__u8 mc_loop:1,
|
175 |
|
|
recverr:1,
|
176 |
|
|
sndflow:1,
|
177 |
|
|
pmtudisc:2,
|
178 |
|
|
ipv6only:1;
|
179 |
|
|
|
180 |
|
|
struct ipv6_mc_socklist *ipv6_mc_list;
|
181 |
|
|
struct ipv6_ac_socklist *ipv6_ac_list;
|
182 |
|
|
struct ipv6_fl_socklist *ipv6_fl_list;
|
183 |
|
|
__u32 dst_cookie;
|
184 |
|
|
|
185 |
|
|
struct ipv6_txoptions *opt;
|
186 |
|
|
struct sk_buff *pktoptions;
|
187 |
|
|
};
|
188 |
|
|
|
189 |
|
|
struct raw6_opt {
|
190 |
|
|
__u32 checksum; /* perform checksum */
|
191 |
|
|
__u32 offset; /* checksum offset */
|
192 |
|
|
|
193 |
|
|
struct icmp6_filter filter;
|
194 |
|
|
};
|
195 |
|
|
|
196 |
|
|
#define __ipv6_only_sock(sk) ((sk)->net_pinfo.af_inet6.ipv6only)
|
197 |
|
|
#define ipv6_only_sock(sk) ((sk)->family == PF_INET6 && \
|
198 |
|
|
(sk)->net_pinfo.af_inet6.ipv6only)
|
199 |
|
|
#else
|
200 |
|
|
#define __ipv6_only_sock(sk) 0
|
201 |
|
|
#define ipv6_only_sock(sk) 0
|
202 |
|
|
#endif /* IPV6 */
|
203 |
|
|
|
204 |
|
|
#if defined(CONFIG_INET) || defined(CONFIG_INET_MODULE)
|
205 |
|
|
struct raw_opt {
|
206 |
|
|
struct icmp_filter filter;
|
207 |
|
|
};
|
208 |
|
|
#endif
|
209 |
|
|
|
210 |
|
|
#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
|
211 |
|
|
struct inet_opt
|
212 |
|
|
{
|
213 |
|
|
int ttl; /* TTL setting */
|
214 |
|
|
int tos; /* TOS */
|
215 |
|
|
unsigned cmsg_flags;
|
216 |
|
|
struct ip_options *opt;
|
217 |
|
|
unsigned char hdrincl; /* Include headers ? */
|
218 |
|
|
__u8 mc_ttl; /* Multicasting TTL */
|
219 |
|
|
__u8 mc_loop; /* Loopback */
|
220 |
|
|
unsigned recverr : 1,
|
221 |
|
|
freebind : 1;
|
222 |
|
|
__u16 id; /* ID counter for DF pkts */
|
223 |
|
|
__u8 pmtudisc;
|
224 |
|
|
int mc_index; /* Multicast device index */
|
225 |
|
|
__u32 mc_addr;
|
226 |
|
|
struct ip_mc_socklist *mc_list; /* Group array */
|
227 |
|
|
};
|
228 |
|
|
#endif
|
229 |
|
|
|
230 |
|
|
#if defined(CONFIG_PPPOE) || defined (CONFIG_PPPOE_MODULE)
|
231 |
|
|
struct pppoe_opt
|
232 |
|
|
{
|
233 |
|
|
struct net_device *dev; /* device associated with socket*/
|
234 |
|
|
struct pppoe_addr pa; /* what this socket is bound to*/
|
235 |
|
|
struct sockaddr_pppox relay; /* what socket data will be
|
236 |
|
|
relayed to (PPPoE relaying) */
|
237 |
|
|
};
|
238 |
|
|
|
239 |
|
|
struct pppox_opt
|
240 |
|
|
{
|
241 |
|
|
struct ppp_channel chan;
|
242 |
|
|
struct sock *sk;
|
243 |
|
|
struct pppox_opt *next; /* for hash table */
|
244 |
|
|
union {
|
245 |
|
|
struct pppoe_opt pppoe;
|
246 |
|
|
} proto;
|
247 |
|
|
};
|
248 |
|
|
#define pppoe_dev proto.pppoe.dev
|
249 |
|
|
#define pppoe_pa proto.pppoe.pa
|
250 |
|
|
#define pppoe_relay proto.pppoe.relay
|
251 |
|
|
#endif
|
252 |
|
|
|
253 |
|
|
/* This defines a selective acknowledgement block. */
|
254 |
|
|
struct tcp_sack_block {
|
255 |
|
|
__u32 start_seq;
|
256 |
|
|
__u32 end_seq;
|
257 |
|
|
};
|
258 |
|
|
|
259 |
|
|
struct tcp_opt {
|
260 |
|
|
int tcp_header_len; /* Bytes of tcp header to send */
|
261 |
|
|
|
262 |
|
|
/*
|
263 |
|
|
* Header prediction flags
|
264 |
|
|
* 0x5?10 << 16 + snd_wnd in net byte order
|
265 |
|
|
*/
|
266 |
|
|
__u32 pred_flags;
|
267 |
|
|
|
268 |
|
|
/*
|
269 |
|
|
* RFC793 variables by their proper names. This means you can
|
270 |
|
|
* read the code and the spec side by side (and laugh ...)
|
271 |
|
|
* See RFC793 and RFC1122. The RFC writes these in capitals.
|
272 |
|
|
*/
|
273 |
|
|
__u32 rcv_nxt; /* What we want to receive next */
|
274 |
|
|
__u32 snd_nxt; /* Next sequence we send */
|
275 |
|
|
|
276 |
|
|
__u32 snd_una; /* First byte we want an ack for */
|
277 |
|
|
__u32 snd_sml; /* Last byte of the most recently transmitted small packet */
|
278 |
|
|
__u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
|
279 |
|
|
__u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
|
280 |
|
|
|
281 |
|
|
/* Delayed ACK control data */
|
282 |
|
|
struct {
|
283 |
|
|
__u8 pending; /* ACK is pending */
|
284 |
|
|
__u8 quick; /* Scheduled number of quick acks */
|
285 |
|
|
__u8 pingpong; /* The session is interactive */
|
286 |
|
|
__u8 blocked; /* Delayed ACK was blocked by socket lock*/
|
287 |
|
|
__u32 ato; /* Predicted tick of soft clock */
|
288 |
|
|
unsigned long timeout; /* Currently scheduled timeout */
|
289 |
|
|
__u32 lrcvtime; /* timestamp of last received data packet*/
|
290 |
|
|
__u16 last_seg_size; /* Size of last incoming segment */
|
291 |
|
|
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
|
292 |
|
|
} ack;
|
293 |
|
|
|
294 |
|
|
/* Data for direct copy to user */
|
295 |
|
|
struct {
|
296 |
|
|
struct sk_buff_head prequeue;
|
297 |
|
|
struct task_struct *task;
|
298 |
|
|
struct iovec *iov;
|
299 |
|
|
int memory;
|
300 |
|
|
int len;
|
301 |
|
|
} ucopy;
|
302 |
|
|
|
303 |
|
|
__u32 snd_wl1; /* Sequence for window update */
|
304 |
|
|
__u32 snd_wnd; /* The window we expect to receive */
|
305 |
|
|
__u32 max_window; /* Maximal window ever seen from peer */
|
306 |
|
|
__u32 pmtu_cookie; /* Last pmtu seen by socket */
|
307 |
|
|
__u16 mss_cache; /* Cached effective mss, not including SACKS */
|
308 |
|
|
__u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
|
309 |
|
|
__u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
|
310 |
|
|
__u8 ca_state; /* State of fast-retransmit machine */
|
311 |
|
|
__u8 retransmits; /* Number of unrecovered RTO timeouts. */
|
312 |
|
|
|
313 |
|
|
__u8 reordering; /* Packet reordering metric. */
|
314 |
|
|
__u8 queue_shrunk; /* Write queue has been shrunk recently.*/
|
315 |
|
|
__u8 defer_accept; /* User waits for some data after accept() */
|
316 |
|
|
|
317 |
|
|
/* RTT measurement */
|
318 |
|
|
__u8 backoff; /* backoff */
|
319 |
|
|
__u32 srtt; /* smothed round trip time << 3 */
|
320 |
|
|
__u32 mdev; /* medium deviation */
|
321 |
|
|
__u32 mdev_max; /* maximal mdev for the last rtt period */
|
322 |
|
|
__u32 rttvar; /* smoothed mdev_max */
|
323 |
|
|
__u32 rtt_seq; /* sequence number to update rttvar */
|
324 |
|
|
__u32 rto; /* retransmit timeout */
|
325 |
|
|
|
326 |
|
|
__u32 packets_out; /* Packets which are "in flight" */
|
327 |
|
|
__u32 left_out; /* Packets which leaved network */
|
328 |
|
|
__u32 retrans_out; /* Retransmitted packets out */
|
329 |
|
|
|
330 |
|
|
|
331 |
|
|
/*
|
332 |
|
|
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
|
333 |
|
|
*/
|
334 |
|
|
__u32 snd_ssthresh; /* Slow start size threshold */
|
335 |
|
|
__u32 snd_cwnd; /* Sending congestion window */
|
336 |
|
|
__u16 snd_cwnd_cnt; /* Linear increase counter */
|
337 |
|
|
__u16 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */
|
338 |
|
|
__u32 snd_cwnd_used;
|
339 |
|
|
__u32 snd_cwnd_stamp;
|
340 |
|
|
|
341 |
|
|
/* Two commonly used timers in both sender and receiver paths. */
|
342 |
|
|
unsigned long timeout;
|
343 |
|
|
struct timer_list retransmit_timer; /* Resend (no ack) */
|
344 |
|
|
struct timer_list delack_timer; /* Ack delay */
|
345 |
|
|
|
346 |
|
|
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
|
347 |
|
|
|
348 |
|
|
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
|
349 |
|
|
struct sk_buff *send_head; /* Front of stuff to transmit */
|
350 |
|
|
struct page *sndmsg_page; /* Cached page for sendmsg */
|
351 |
|
|
u32 sndmsg_off; /* Cached offset for sendmsg */
|
352 |
|
|
|
353 |
|
|
__u32 rcv_wnd; /* Current receiver window */
|
354 |
|
|
__u32 rcv_wup; /* rcv_nxt on last window update sent */
|
355 |
|
|
__u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
|
356 |
|
|
__u32 pushed_seq; /* Last pushed seq, required to talk to windows */
|
357 |
|
|
__u32 copied_seq; /* Head of yet unread data */
|
358 |
|
|
/*
|
359 |
|
|
* Options received (usually on last packet, some only on SYN packets).
|
360 |
|
|
*/
|
361 |
|
|
char tstamp_ok, /* TIMESTAMP seen on SYN packet */
|
362 |
|
|
wscale_ok, /* Wscale seen on SYN packet */
|
363 |
|
|
sack_ok; /* SACK seen on SYN packet */
|
364 |
|
|
char saw_tstamp; /* Saw TIMESTAMP on last packet */
|
365 |
|
|
__u8 snd_wscale; /* Window scaling received from sender */
|
366 |
|
|
__u8 rcv_wscale; /* Window scaling to send to receiver */
|
367 |
|
|
__u8 nonagle; /* Disable Nagle algorithm? */
|
368 |
|
|
__u8 keepalive_probes; /* num of allowed keep alive probes */
|
369 |
|
|
|
370 |
|
|
/* PAWS/RTTM data */
|
371 |
|
|
__u32 rcv_tsval; /* Time stamp value */
|
372 |
|
|
__u32 rcv_tsecr; /* Time stamp echo reply */
|
373 |
|
|
__u32 ts_recent; /* Time stamp to echo next */
|
374 |
|
|
long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
|
375 |
|
|
|
376 |
|
|
/* SACKs data */
|
377 |
|
|
__u16 user_mss; /* mss requested by user in ioctl */
|
378 |
|
|
__u8 dsack; /* D-SACK is scheduled */
|
379 |
|
|
__u8 eff_sacks; /* Size of SACK array to send with next packet */
|
380 |
|
|
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
|
381 |
|
|
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
|
382 |
|
|
|
383 |
|
|
__u32 window_clamp; /* Maximal window to advertise */
|
384 |
|
|
__u32 rcv_ssthresh; /* Current window clamp */
|
385 |
|
|
__u8 probes_out; /* unanswered 0 window probes */
|
386 |
|
|
__u8 num_sacks; /* Number of SACK blocks */
|
387 |
|
|
__u16 advmss; /* Advertised MSS */
|
388 |
|
|
|
389 |
|
|
__u8 syn_retries; /* num of allowed syn retries */
|
390 |
|
|
__u8 ecn_flags; /* ECN status bits. */
|
391 |
|
|
__u16 prior_ssthresh; /* ssthresh saved at recovery start */
|
392 |
|
|
__u32 lost_out; /* Lost packets */
|
393 |
|
|
__u32 sacked_out; /* SACK'd packets */
|
394 |
|
|
__u32 fackets_out; /* FACK'd packets */
|
395 |
|
|
__u32 high_seq; /* snd_nxt at onset of congestion */
|
396 |
|
|
|
397 |
|
|
__u32 retrans_stamp; /* Timestamp of the last retransmit,
|
398 |
|
|
* also used in SYN-SENT to remember stamp of
|
399 |
|
|
* the first SYN. */
|
400 |
|
|
__u32 undo_marker; /* tracking retrans started here. */
|
401 |
|
|
int undo_retrans; /* number of undoable retransmissions. */
|
402 |
|
|
__u32 urg_seq; /* Seq of received urgent pointer */
|
403 |
|
|
__u16 urg_data; /* Saved octet of OOB data and control flags */
|
404 |
|
|
__u8 pending; /* Scheduled timer event */
|
405 |
|
|
__u8 urg_mode; /* In urgent mode */
|
406 |
|
|
__u32 snd_up; /* Urgent pointer */
|
407 |
|
|
|
408 |
|
|
/* The syn_wait_lock is necessary only to avoid tcp_get_info having
|
409 |
|
|
* to grab the main lock sock while browsing the listening hash
|
410 |
|
|
* (otherwise it's deadlock prone).
|
411 |
|
|
* This lock is acquired in read mode only from tcp_get_info() and
|
412 |
|
|
* it's acquired in write mode _only_ from code that is actively
|
413 |
|
|
* changing the syn_wait_queue. All readers that are holding
|
414 |
|
|
* the master sock lock don't need to grab this lock in read mode
|
415 |
|
|
* too as the syn_wait_queue writes are always protected from
|
416 |
|
|
* the main sock lock.
|
417 |
|
|
*/
|
418 |
|
|
rwlock_t syn_wait_lock;
|
419 |
|
|
struct tcp_listen_opt *listen_opt;
|
420 |
|
|
|
421 |
|
|
/* FIFO of established children */
|
422 |
|
|
struct open_request *accept_queue;
|
423 |
|
|
struct open_request *accept_queue_tail;
|
424 |
|
|
|
425 |
|
|
int write_pending; /* A write to socket waits to start. */
|
426 |
|
|
|
427 |
|
|
unsigned int keepalive_time; /* time before keep alive takes place */
|
428 |
|
|
unsigned int keepalive_intvl; /* time interval between keep alive probes */
|
429 |
|
|
int linger2;
|
430 |
|
|
|
431 |
|
|
int frto_counter; /* Number of new acks after RTO */
|
432 |
|
|
__u32 frto_highmark; /* snd_nxt when RTO occurred */
|
433 |
|
|
|
434 |
|
|
unsigned long last_synq_overflow;
|
435 |
|
|
|
436 |
|
|
/* TCP Westwood structure */
|
437 |
|
|
struct {
|
438 |
|
|
__u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
|
439 |
|
|
__u32 bw_est; /* bandwidth estimate */
|
440 |
|
|
__u32 rtt_win_sx; /* here starts a new evaluation... */
|
441 |
|
|
__u32 bk;
|
442 |
|
|
__u32 snd_una; /* used for evaluating the number of acked bytes */
|
443 |
|
|
__u32 cumul_ack;
|
444 |
|
|
__u32 accounted;
|
445 |
|
|
__u32 rtt;
|
446 |
|
|
__u32 rtt_min; /* minimum observed RTT */
|
447 |
|
|
} westwood;
|
448 |
|
|
};
|
449 |
|
|
|
450 |
|
|
|
451 |
|
|
/*
|
452 |
|
|
* This structure really needs to be cleaned up.
|
453 |
|
|
* Most of it is for TCP, and not used by any of
|
454 |
|
|
* the other protocols.
|
455 |
|
|
*/
|
456 |
|
|
|
457 |
|
|
/*
|
458 |
|
|
* The idea is to start moving to a newer struct gradualy
|
459 |
|
|
*
|
460 |
|
|
* IMHO the newer struct should have the following format:
|
461 |
|
|
*
|
462 |
|
|
* struct sock {
|
463 |
|
|
* sockmem [mem, proto, callbacks]
|
464 |
|
|
*
|
465 |
|
|
* union or struct {
|
466 |
|
|
* ax25;
|
467 |
|
|
* } ll_pinfo;
|
468 |
|
|
*
|
469 |
|
|
* union {
|
470 |
|
|
* ipv4;
|
471 |
|
|
* ipv6;
|
472 |
|
|
* ipx;
|
473 |
|
|
* netrom;
|
474 |
|
|
* rose;
|
475 |
|
|
* x25;
|
476 |
|
|
* } net_pinfo;
|
477 |
|
|
*
|
478 |
|
|
* union {
|
479 |
|
|
* tcp;
|
480 |
|
|
* udp;
|
481 |
|
|
* spx;
|
482 |
|
|
* netrom;
|
483 |
|
|
* } tp_pinfo;
|
484 |
|
|
*
|
485 |
|
|
* }
|
486 |
|
|
*
|
487 |
|
|
* The idea failed because IPv6 transition asssumes dual IP/IPv6 sockets.
|
488 |
|
|
* So, net_pinfo is IPv6 are really, and protinfo unifies all another
|
489 |
|
|
* private areas.
|
490 |
|
|
*/
|
491 |
|
|
|
492 |
|
|
/* Define this to get the sk->debug debugging facility. */
|
493 |
|
|
#define SOCK_DEBUGGING
|
494 |
|
|
#ifdef SOCK_DEBUGGING
|
495 |
|
|
#define SOCK_DEBUG(sk, msg...) do { if((sk) && ((sk)->debug)) printk(KERN_DEBUG msg); } while (0)
|
496 |
|
|
#else
|
497 |
|
|
#define SOCK_DEBUG(sk, msg...) do { } while (0)
|
498 |
|
|
#endif
|
499 |
|
|
|
500 |
|
|
/* This is the per-socket lock. The spinlock provides a synchronization
|
501 |
|
|
* between user contexts and software interrupt processing, whereas the
|
502 |
|
|
* mini-semaphore synchronizes multiple users amongst themselves.
|
503 |
|
|
*/
|
504 |
|
|
typedef struct {
|
505 |
|
|
spinlock_t slock;
|
506 |
|
|
unsigned int users;
|
507 |
|
|
wait_queue_head_t wq;
|
508 |
|
|
} socket_lock_t;
|
509 |
|
|
|
510 |
|
|
#define sock_lock_init(__sk) \
|
511 |
|
|
do { spin_lock_init(&((__sk)->lock.slock)); \
|
512 |
|
|
(__sk)->lock.users = 0; \
|
513 |
|
|
init_waitqueue_head(&((__sk)->lock.wq)); \
|
514 |
|
|
} while(0)
|
515 |
|
|
|
516 |
|
|
struct sock {
|
517 |
|
|
/* Socket demultiplex comparisons on incoming packets. */
|
518 |
|
|
__u32 daddr; /* Foreign IPv4 addr */
|
519 |
|
|
__u32 rcv_saddr; /* Bound local IPv4 addr */
|
520 |
|
|
__u16 dport; /* Destination port */
|
521 |
|
|
unsigned short num; /* Local port */
|
522 |
|
|
int bound_dev_if; /* Bound device index if != 0 */
|
523 |
|
|
|
524 |
|
|
/* Main hash linkage for various protocol lookup tables. */
|
525 |
|
|
struct sock *next;
|
526 |
|
|
struct sock **pprev;
|
527 |
|
|
struct sock *bind_next;
|
528 |
|
|
struct sock **bind_pprev;
|
529 |
|
|
|
530 |
|
|
volatile unsigned char state, /* Connection state */
|
531 |
|
|
zapped; /* In ax25 & ipx means not linked */
|
532 |
|
|
__u16 sport; /* Source port */
|
533 |
|
|
|
534 |
|
|
unsigned short family; /* Address family */
|
535 |
|
|
unsigned char reuse; /* SO_REUSEADDR setting */
|
536 |
|
|
unsigned char shutdown;
|
537 |
|
|
atomic_t refcnt; /* Reference count */
|
538 |
|
|
|
539 |
|
|
socket_lock_t lock; /* Synchronizer... */
|
540 |
|
|
int rcvbuf; /* Size of receive buffer in bytes */
|
541 |
|
|
|
542 |
|
|
wait_queue_head_t *sleep; /* Sock wait queue */
|
543 |
|
|
struct dst_entry *dst_cache; /* Destination cache */
|
544 |
|
|
rwlock_t dst_lock;
|
545 |
|
|
atomic_t rmem_alloc; /* Receive queue bytes committed */
|
546 |
|
|
struct sk_buff_head receive_queue; /* Incoming packets */
|
547 |
|
|
atomic_t wmem_alloc; /* Transmit queue bytes committed */
|
548 |
|
|
struct sk_buff_head write_queue; /* Packet sending queue */
|
549 |
|
|
atomic_t omem_alloc; /* "o" is "option" or "other" */
|
550 |
|
|
int wmem_queued; /* Persistent queue size */
|
551 |
|
|
int forward_alloc; /* Space allocated forward. */
|
552 |
|
|
__u32 saddr; /* Sending source */
|
553 |
|
|
unsigned int allocation; /* Allocation mode */
|
554 |
|
|
int sndbuf; /* Size of send buffer in bytes */
|
555 |
|
|
struct sock *prev;
|
556 |
|
|
|
557 |
|
|
/* Not all are volatile, but some are, so we might as well say they all are.
|
558 |
|
|
* XXX Make this a flag word -DaveM
|
559 |
|
|
*/
|
560 |
|
|
volatile char dead,
|
561 |
|
|
done,
|
562 |
|
|
urginline,
|
563 |
|
|
keepopen,
|
564 |
|
|
linger,
|
565 |
|
|
destroy,
|
566 |
|
|
no_check,
|
567 |
|
|
broadcast,
|
568 |
|
|
bsdism;
|
569 |
|
|
unsigned char debug;
|
570 |
|
|
unsigned char rcvtstamp;
|
571 |
|
|
unsigned char use_write_queue;
|
572 |
|
|
unsigned char userlocks;
|
573 |
|
|
/* Hole of 3 bytes. Try to pack. */
|
574 |
|
|
int route_caps;
|
575 |
|
|
int proc;
|
576 |
|
|
unsigned long lingertime;
|
577 |
|
|
|
578 |
|
|
int hashent;
|
579 |
|
|
struct sock *pair;
|
580 |
|
|
|
581 |
|
|
/* The backlog queue is special, it is always used with
|
582 |
|
|
* the per-socket spinlock held and requires low latency
|
583 |
|
|
* access. Therefore we special case it's implementation.
|
584 |
|
|
*/
|
585 |
|
|
struct {
|
586 |
|
|
struct sk_buff *head;
|
587 |
|
|
struct sk_buff *tail;
|
588 |
|
|
} backlog;
|
589 |
|
|
|
590 |
|
|
rwlock_t callback_lock;
|
591 |
|
|
|
592 |
|
|
/* Error queue, rarely used. */
|
593 |
|
|
struct sk_buff_head error_queue;
|
594 |
|
|
|
595 |
|
|
struct proto *prot;
|
596 |
|
|
|
597 |
|
|
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
598 |
|
|
union {
|
599 |
|
|
struct ipv6_pinfo af_inet6;
|
600 |
|
|
} net_pinfo;
|
601 |
|
|
#endif
|
602 |
|
|
|
603 |
|
|
union {
|
604 |
|
|
struct tcp_opt af_tcp;
|
605 |
|
|
#if defined(CONFIG_IP_SCTP) || defined (CONFIG_IP_SCTP_MODULE)
|
606 |
|
|
struct sctp_opt af_sctp;
|
607 |
|
|
#endif
|
608 |
|
|
#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
|
609 |
|
|
struct raw_opt tp_raw4;
|
610 |
|
|
#endif
|
611 |
|
|
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
|
612 |
|
|
struct raw6_opt tp_raw;
|
613 |
|
|
#endif /* CONFIG_IPV6 */
|
614 |
|
|
#if defined(CONFIG_SPX) || defined (CONFIG_SPX_MODULE)
|
615 |
|
|
struct spx_opt af_spx;
|
616 |
|
|
#endif /* CONFIG_SPX */
|
617 |
|
|
|
618 |
|
|
} tp_pinfo;
|
619 |
|
|
|
620 |
|
|
int err, err_soft; /* Soft holds errors that don't
|
621 |
|
|
cause failure but are the cause
|
622 |
|
|
of a persistent failure not just
|
623 |
|
|
'timed out' */
|
624 |
|
|
unsigned short ack_backlog;
|
625 |
|
|
unsigned short max_ack_backlog;
|
626 |
|
|
__u32 priority;
|
627 |
|
|
unsigned short type;
|
628 |
|
|
unsigned char localroute; /* Route locally only */
|
629 |
|
|
unsigned char protocol;
|
630 |
|
|
struct ucred peercred;
|
631 |
|
|
int rcvlowat;
|
632 |
|
|
long rcvtimeo;
|
633 |
|
|
long sndtimeo;
|
634 |
|
|
|
635 |
|
|
#ifdef CONFIG_FILTER
|
636 |
|
|
/* Socket Filtering Instructions */
|
637 |
|
|
struct sk_filter *filter;
|
638 |
|
|
#endif /* CONFIG_FILTER */
|
639 |
|
|
|
640 |
|
|
/* This is where all the private (optional) areas that don't
|
641 |
|
|
* overlap will eventually live.
|
642 |
|
|
*/
|
643 |
|
|
union {
|
644 |
|
|
void *destruct_hook;
|
645 |
|
|
struct unix_opt af_unix;
|
646 |
|
|
#if defined(CONFIG_INET) || defined (CONFIG_INET_MODULE)
|
647 |
|
|
struct inet_opt af_inet;
|
648 |
|
|
#endif
|
649 |
|
|
#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
|
650 |
|
|
struct atalk_sock af_at;
|
651 |
|
|
#endif
|
652 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
653 |
|
|
struct ipx_opt af_ipx;
|
654 |
|
|
#endif
|
655 |
|
|
#if defined (CONFIG_DECNET) || defined(CONFIG_DECNET_MODULE)
|
656 |
|
|
struct dn_scp dn;
|
657 |
|
|
#endif
|
658 |
|
|
#if defined (CONFIG_PACKET) || defined(CONFIG_PACKET_MODULE)
|
659 |
|
|
struct packet_opt *af_packet;
|
660 |
|
|
#endif
|
661 |
|
|
#if defined(CONFIG_X25) || defined(CONFIG_X25_MODULE)
|
662 |
|
|
x25_cb *x25;
|
663 |
|
|
#endif
|
664 |
|
|
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
665 |
|
|
ax25_cb *ax25;
|
666 |
|
|
#endif
|
667 |
|
|
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
|
668 |
|
|
nr_cb *nr;
|
669 |
|
|
#endif
|
670 |
|
|
#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
|
671 |
|
|
rose_cb *rose;
|
672 |
|
|
#endif
|
673 |
|
|
#if defined(CONFIG_PPPOE) || defined(CONFIG_PPPOE_MODULE)
|
674 |
|
|
struct pppox_opt *pppox;
|
675 |
|
|
#endif
|
676 |
|
|
struct netlink_opt *af_netlink;
|
677 |
|
|
#if defined(CONFIG_ECONET) || defined(CONFIG_ECONET_MODULE)
|
678 |
|
|
struct econet_opt *af_econet;
|
679 |
|
|
#endif
|
680 |
|
|
#if defined(CONFIG_ATM) || defined(CONFIG_ATM_MODULE)
|
681 |
|
|
struct atm_vcc *af_atm;
|
682 |
|
|
#endif
|
683 |
|
|
#if defined(CONFIG_IRDA) || defined(CONFIG_IRDA_MODULE)
|
684 |
|
|
struct irda_sock *irda;
|
685 |
|
|
#endif
|
686 |
|
|
#if defined(CONFIG_WAN_ROUTER) || defined(CONFIG_WAN_ROUTER_MODULE)
|
687 |
|
|
struct wanpipe_opt *af_wanpipe;
|
688 |
|
|
#endif
|
689 |
|
|
} protinfo;
|
690 |
|
|
|
691 |
|
|
|
692 |
|
|
/* This part is used for the timeout functions. */
|
693 |
|
|
struct timer_list timer; /* This is the sock cleanup timer. */
|
694 |
|
|
struct timeval stamp;
|
695 |
|
|
|
696 |
|
|
/* Identd and reporting IO signals */
|
697 |
|
|
struct socket *socket;
|
698 |
|
|
|
699 |
|
|
/* RPC layer private data */
|
700 |
|
|
void *user_data;
|
701 |
|
|
|
702 |
|
|
/* Callbacks */
|
703 |
|
|
void (*state_change)(struct sock *sk);
|
704 |
|
|
void (*data_ready)(struct sock *sk,int bytes);
|
705 |
|
|
void (*write_space)(struct sock *sk);
|
706 |
|
|
void (*error_report)(struct sock *sk);
|
707 |
|
|
|
708 |
|
|
int (*backlog_rcv) (struct sock *sk,
|
709 |
|
|
struct sk_buff *skb);
|
710 |
|
|
void (*destruct)(struct sock *sk);
|
711 |
|
|
};
|
712 |
|
|
|
713 |
|
|
/* The per-socket spinlock must be held here. */
|
714 |
|
|
#define sk_add_backlog(__sk, __skb) \
|
715 |
|
|
do { if((__sk)->backlog.tail == NULL) { \
|
716 |
|
|
(__sk)->backlog.head = \
|
717 |
|
|
(__sk)->backlog.tail = (__skb); \
|
718 |
|
|
} else { \
|
719 |
|
|
((__sk)->backlog.tail)->next = (__skb); \
|
720 |
|
|
(__sk)->backlog.tail = (__skb); \
|
721 |
|
|
} \
|
722 |
|
|
(__skb)->next = NULL; \
|
723 |
|
|
} while(0)
|
724 |
|
|
|
725 |
|
|
/* IP protocol blocks we attach to sockets.
|
726 |
|
|
* socket layer -> transport layer interface
|
727 |
|
|
* transport -> network interface is defined by struct inet_proto
|
728 |
|
|
*/
|
729 |
|
|
struct proto {
|
730 |
|
|
void (*close)(struct sock *sk,
|
731 |
|
|
long timeout);
|
732 |
|
|
int (*connect)(struct sock *sk,
|
733 |
|
|
struct sockaddr *uaddr,
|
734 |
|
|
int addr_len);
|
735 |
|
|
int (*disconnect)(struct sock *sk, int flags);
|
736 |
|
|
|
737 |
|
|
struct sock * (*accept) (struct sock *sk, int flags, int *err);
|
738 |
|
|
|
739 |
|
|
int (*ioctl)(struct sock *sk, int cmd,
|
740 |
|
|
unsigned long arg);
|
741 |
|
|
int (*init)(struct sock *sk);
|
742 |
|
|
int (*destroy)(struct sock *sk);
|
743 |
|
|
void (*shutdown)(struct sock *sk, int how);
|
744 |
|
|
int (*setsockopt)(struct sock *sk, int level,
|
745 |
|
|
int optname, char *optval, int optlen);
|
746 |
|
|
int (*getsockopt)(struct sock *sk, int level,
|
747 |
|
|
int optname, char *optval,
|
748 |
|
|
int *option);
|
749 |
|
|
int (*sendmsg)(struct sock *sk, struct msghdr *msg,
|
750 |
|
|
int len);
|
751 |
|
|
int (*recvmsg)(struct sock *sk, struct msghdr *msg,
|
752 |
|
|
int len, int noblock, int flags,
|
753 |
|
|
int *addr_len);
|
754 |
|
|
int (*bind)(struct sock *sk,
|
755 |
|
|
struct sockaddr *uaddr, int addr_len);
|
756 |
|
|
|
757 |
|
|
int (*backlog_rcv) (struct sock *sk,
|
758 |
|
|
struct sk_buff *skb);
|
759 |
|
|
|
760 |
|
|
/* Keeping track of sk's, looking them up, and port selection methods. */
|
761 |
|
|
void (*hash)(struct sock *sk);
|
762 |
|
|
void (*unhash)(struct sock *sk);
|
763 |
|
|
int (*get_port)(struct sock *sk, unsigned short snum);
|
764 |
|
|
|
765 |
|
|
char name[32];
|
766 |
|
|
|
767 |
|
|
struct {
|
768 |
|
|
int inuse;
|
769 |
|
|
u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
|
770 |
|
|
} stats[NR_CPUS];
|
771 |
|
|
};
|
772 |
|
|
|
773 |
|
|
/* Called with local bh disabled */
|
774 |
|
|
static __inline__ void sock_prot_inc_use(struct proto *prot)
|
775 |
|
|
{
|
776 |
|
|
prot->stats[smp_processor_id()].inuse++;
|
777 |
|
|
}
|
778 |
|
|
|
779 |
|
|
static __inline__ void sock_prot_dec_use(struct proto *prot)
|
780 |
|
|
{
|
781 |
|
|
prot->stats[smp_processor_id()].inuse--;
|
782 |
|
|
}
|
783 |
|
|
|
784 |
|
|
/* About 10 seconds */
|
785 |
|
|
#define SOCK_DESTROY_TIME (10*HZ)
|
786 |
|
|
|
787 |
|
|
/* Sockets 0-1023 can't be bound to unless you are superuser */
|
788 |
|
|
#define PROT_SOCK 1024
|
789 |
|
|
|
790 |
|
|
#define SHUTDOWN_MASK 3
|
791 |
|
|
#define RCV_SHUTDOWN 1
|
792 |
|
|
#define SEND_SHUTDOWN 2
|
793 |
|
|
|
794 |
|
|
#define SOCK_SNDBUF_LOCK 1
|
795 |
|
|
#define SOCK_RCVBUF_LOCK 2
|
796 |
|
|
#define SOCK_BINDADDR_LOCK 4
|
797 |
|
|
#define SOCK_BINDPORT_LOCK 8
|
798 |
|
|
|
799 |
|
|
|
800 |
|
|
/* Used by processes to "lock" a socket state, so that
|
801 |
|
|
* interrupts and bottom half handlers won't change it
|
802 |
|
|
* from under us. It essentially blocks any incoming
|
803 |
|
|
* packets, so that we won't get any new data or any
|
804 |
|
|
* packets that change the state of the socket.
|
805 |
|
|
*
|
806 |
|
|
* While locked, BH processing will add new packets to
|
807 |
|
|
* the backlog queue. This queue is processed by the
|
808 |
|
|
* owner of the socket lock right before it is released.
|
809 |
|
|
*
|
810 |
|
|
* Since ~2.3.5 it is also exclusive sleep lock serializing
|
811 |
|
|
* accesses from user process context.
|
812 |
|
|
*/
|
813 |
|
|
extern void __lock_sock(struct sock *sk);
|
814 |
|
|
extern void __release_sock(struct sock *sk);
|
815 |
|
|
#define lock_sock(__sk) \
|
816 |
|
|
do { spin_lock_bh(&((__sk)->lock.slock)); \
|
817 |
|
|
if ((__sk)->lock.users != 0) \
|
818 |
|
|
__lock_sock(__sk); \
|
819 |
|
|
(__sk)->lock.users = 1; \
|
820 |
|
|
spin_unlock_bh(&((__sk)->lock.slock)); \
|
821 |
|
|
} while(0)
|
822 |
|
|
|
823 |
|
|
#define release_sock(__sk) \
|
824 |
|
|
do { spin_lock_bh(&((__sk)->lock.slock)); \
|
825 |
|
|
if ((__sk)->backlog.tail != NULL) \
|
826 |
|
|
__release_sock(__sk); \
|
827 |
|
|
(__sk)->lock.users = 0; \
|
828 |
|
|
if (waitqueue_active(&((__sk)->lock.wq))) wake_up(&((__sk)->lock.wq)); \
|
829 |
|
|
spin_unlock_bh(&((__sk)->lock.slock)); \
|
830 |
|
|
} while(0)
|
831 |
|
|
|
832 |
|
|
/* BH context may only use the following locking interface. */
|
833 |
|
|
#define bh_lock_sock(__sk) spin_lock(&((__sk)->lock.slock))
|
834 |
|
|
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->lock.slock))
|
835 |
|
|
|
836 |
|
|
extern struct sock * sk_alloc(int family, int priority, int zero_it);
|
837 |
|
|
extern void sk_free(struct sock *sk);
|
838 |
|
|
|
839 |
|
|
extern struct sk_buff *sock_wmalloc(struct sock *sk,
|
840 |
|
|
unsigned long size, int force,
|
841 |
|
|
int priority);
|
842 |
|
|
extern struct sk_buff *sock_rmalloc(struct sock *sk,
|
843 |
|
|
unsigned long size, int force,
|
844 |
|
|
int priority);
|
845 |
|
|
extern void sock_wfree(struct sk_buff *skb);
|
846 |
|
|
extern void sock_rfree(struct sk_buff *skb);
|
847 |
|
|
|
848 |
|
|
extern int sock_setsockopt(struct socket *sock, int level,
|
849 |
|
|
int op, char *optval,
|
850 |
|
|
int optlen);
|
851 |
|
|
|
852 |
|
|
extern int sock_getsockopt(struct socket *sock, int level,
|
853 |
|
|
int op, char *optval,
|
854 |
|
|
int *optlen);
|
855 |
|
|
extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
|
856 |
|
|
unsigned long size,
|
857 |
|
|
int noblock,
|
858 |
|
|
int *errcode);
|
859 |
|
|
extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
|
860 |
|
|
unsigned long header_len,
|
861 |
|
|
unsigned long data_len,
|
862 |
|
|
int noblock,
|
863 |
|
|
int *errcode);
|
864 |
|
|
extern void *sock_kmalloc(struct sock *sk, int size, int priority);
|
865 |
|
|
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
|
866 |
|
|
|
867 |
|
|
/*
|
868 |
|
|
* Functions to fill in entries in struct proto_ops when a protocol
|
869 |
|
|
* does not implement a particular function.
|
870 |
|
|
*/
|
871 |
|
|
extern int sock_no_release(struct socket *);
|
872 |
|
|
extern int sock_no_bind(struct socket *,
|
873 |
|
|
struct sockaddr *, int);
|
874 |
|
|
extern int sock_no_connect(struct socket *,
|
875 |
|
|
struct sockaddr *, int, int);
|
876 |
|
|
extern int sock_no_socketpair(struct socket *,
|
877 |
|
|
struct socket *);
|
878 |
|
|
extern int sock_no_accept(struct socket *,
|
879 |
|
|
struct socket *, int);
|
880 |
|
|
extern int sock_no_getname(struct socket *,
|
881 |
|
|
struct sockaddr *, int *, int);
|
882 |
|
|
extern unsigned int sock_no_poll(struct file *, struct socket *,
|
883 |
|
|
struct poll_table_struct *);
|
884 |
|
|
extern int sock_no_ioctl(struct socket *, unsigned int,
|
885 |
|
|
unsigned long);
|
886 |
|
|
extern int sock_no_listen(struct socket *, int);
|
887 |
|
|
extern int sock_no_shutdown(struct socket *, int);
|
888 |
|
|
extern int sock_no_getsockopt(struct socket *, int , int,
|
889 |
|
|
char *, int *);
|
890 |
|
|
extern int sock_no_setsockopt(struct socket *, int, int,
|
891 |
|
|
char *, int);
|
892 |
|
|
extern int sock_no_fcntl(struct socket *,
|
893 |
|
|
unsigned int, unsigned long);
|
894 |
|
|
extern int sock_no_sendmsg(struct socket *,
|
895 |
|
|
struct msghdr *, int,
|
896 |
|
|
struct scm_cookie *);
|
897 |
|
|
extern int sock_no_recvmsg(struct socket *,
|
898 |
|
|
struct msghdr *, int, int,
|
899 |
|
|
struct scm_cookie *);
|
900 |
|
|
extern int sock_no_mmap(struct file *file,
|
901 |
|
|
struct socket *sock,
|
902 |
|
|
struct vm_area_struct *vma);
|
903 |
|
|
extern ssize_t sock_no_sendpage(struct socket *sock,
|
904 |
|
|
struct page *page,
|
905 |
|
|
int offset, size_t size,
|
906 |
|
|
int flags);
|
907 |
|
|
|
908 |
|
|
/*
|
909 |
|
|
* Default socket callbacks and setup code
|
910 |
|
|
*/
|
911 |
|
|
|
912 |
|
|
extern void sock_def_destruct(struct sock *);
|
913 |
|
|
|
914 |
|
|
/* Initialise core socket variables */
|
915 |
|
|
extern void sock_init_data(struct socket *sock, struct sock *sk);
|
916 |
|
|
|
917 |
|
|
extern void sklist_remove_socket(struct sock **list, struct sock *sk);
|
918 |
|
|
extern void sklist_insert_socket(struct sock **list, struct sock *sk);
|
919 |
|
|
extern void sklist_destroy_socket(struct sock **list, struct sock *sk);
|
920 |
|
|
|
921 |
|
|
#ifdef CONFIG_FILTER
|
922 |
|
|
|
923 |
|
|
/**
|
924 |
|
|
* sk_filter - run a packet through a socket filter
|
925 |
|
|
* @sk: sock associated with &sk_buff
|
926 |
|
|
* @skb: buffer to filter
|
927 |
|
|
* @needlock: set to 1 if the sock is not locked by caller.
|
928 |
|
|
*
|
929 |
|
|
* Run the filter code and then cut skb->data to correct size returned by
|
930 |
|
|
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
|
931 |
|
|
* than pkt_len we keep whole skb->data. This is the socket level
|
932 |
|
|
* wrapper to sk_run_filter. It returns 0 if the packet should
|
933 |
|
|
* be accepted or -EPERM if the packet should be tossed.
|
934 |
|
|
*/
|
935 |
|
|
|
936 |
|
|
static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
|
937 |
|
|
{
|
938 |
|
|
int err = 0;
|
939 |
|
|
|
940 |
|
|
if (sk->filter) {
|
941 |
|
|
struct sk_filter *filter;
|
942 |
|
|
|
943 |
|
|
if (needlock)
|
944 |
|
|
bh_lock_sock(sk);
|
945 |
|
|
|
946 |
|
|
filter = sk->filter;
|
947 |
|
|
if (filter) {
|
948 |
|
|
int pkt_len = sk_run_filter(skb, filter->insns,
|
949 |
|
|
filter->len);
|
950 |
|
|
if (!pkt_len)
|
951 |
|
|
err = -EPERM;
|
952 |
|
|
else
|
953 |
|
|
skb_trim(skb, pkt_len);
|
954 |
|
|
}
|
955 |
|
|
|
956 |
|
|
if (needlock)
|
957 |
|
|
bh_unlock_sock(sk);
|
958 |
|
|
}
|
959 |
|
|
return err;
|
960 |
|
|
}
|
961 |
|
|
|
962 |
|
|
/**
|
963 |
|
|
* sk_filter_release: Release a socket filter
|
964 |
|
|
* @sk: socket
|
965 |
|
|
* @fp: filter to remove
|
966 |
|
|
*
|
967 |
|
|
* Remove a filter from a socket and release its resources.
|
968 |
|
|
*/
|
969 |
|
|
|
970 |
|
|
static inline void sk_filter_release(struct sock *sk, struct sk_filter *fp)
|
971 |
|
|
{
|
972 |
|
|
unsigned int size = sk_filter_len(fp);
|
973 |
|
|
|
974 |
|
|
atomic_sub(size, &sk->omem_alloc);
|
975 |
|
|
|
976 |
|
|
if (atomic_dec_and_test(&fp->refcnt))
|
977 |
|
|
kfree(fp);
|
978 |
|
|
}
|
979 |
|
|
|
980 |
|
|
static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
|
981 |
|
|
{
|
982 |
|
|
atomic_inc(&fp->refcnt);
|
983 |
|
|
atomic_add(sk_filter_len(fp), &sk->omem_alloc);
|
984 |
|
|
}
|
985 |
|
|
|
986 |
|
|
#else
|
987 |
|
|
|
988 |
|
|
static inline int sk_filter(struct sock *sk, struct sk_buff *skb, int needlock)
|
989 |
|
|
{
|
990 |
|
|
return 0;
|
991 |
|
|
}
|
992 |
|
|
|
993 |
|
|
#endif /* CONFIG_FILTER */
|
994 |
|
|
|
995 |
|
|
/*
|
996 |
|
|
* Socket reference counting postulates.
|
997 |
|
|
*
|
998 |
|
|
* * Each user of socket SHOULD hold a reference count.
|
999 |
|
|
* * Each access point to socket (an hash table bucket, reference from a list,
|
1000 |
|
|
* running timer, skb in flight MUST hold a reference count.
|
1001 |
|
|
* * When reference count hits 0, it means it will never increase back.
|
1002 |
|
|
* * When reference count hits 0, it means that no references from
|
1003 |
|
|
* outside exist to this socket and current process on current CPU
|
1004 |
|
|
* is last user and may/should destroy this socket.
|
1005 |
|
|
* * sk_free is called from any context: process, BH, IRQ. When
|
1006 |
|
|
* it is called, socket has no references from outside -> sk_free
|
1007 |
|
|
* may release descendant resources allocated by the socket, but
|
1008 |
|
|
* to the time when it is called, socket is NOT referenced by any
|
1009 |
|
|
* hash tables, lists etc.
|
1010 |
|
|
* * Packets, delivered from outside (from network or from another process)
|
1011 |
|
|
* and enqueued on receive/error queues SHOULD NOT grab reference count,
|
1012 |
|
|
* when they sit in queue. Otherwise, packets will leak to hole, when
|
1013 |
|
|
* socket is looked up by one cpu and unhasing is made by another CPU.
|
1014 |
|
|
* It is true for udp/raw, netlink (leak to receive and error queues), tcp
|
1015 |
|
|
* (leak to backlog). Packet socket does all the processing inside
|
1016 |
|
|
* BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
|
1017 |
|
|
* use separate SMP lock, so that they are prone too.
|
1018 |
|
|
*/
|
1019 |
|
|
|
1020 |
|
|
/* Grab socket reference count. This operation is valid only
|
1021 |
|
|
when sk is ALREADY grabbed f.e. it is found in hash table
|
1022 |
|
|
or a list and the lookup is made under lock preventing hash table
|
1023 |
|
|
modifications.
|
1024 |
|
|
*/
|
1025 |
|
|
|
1026 |
|
|
static inline void sock_hold(struct sock *sk)
|
1027 |
|
|
{
|
1028 |
|
|
atomic_inc(&sk->refcnt);
|
1029 |
|
|
}
|
1030 |
|
|
|
1031 |
|
|
/* Ungrab socket in the context, which assumes that socket refcnt
|
1032 |
|
|
cannot hit zero, f.e. it is true in context of any socketcall.
|
1033 |
|
|
*/
|
1034 |
|
|
static inline void __sock_put(struct sock *sk)
|
1035 |
|
|
{
|
1036 |
|
|
atomic_dec(&sk->refcnt);
|
1037 |
|
|
}
|
1038 |
|
|
|
1039 |
|
|
/* Ungrab socket and destroy it, if it was the last reference. */
|
1040 |
|
|
static inline void sock_put(struct sock *sk)
|
1041 |
|
|
{
|
1042 |
|
|
if (atomic_dec_and_test(&sk->refcnt))
|
1043 |
|
|
sk_free(sk);
|
1044 |
|
|
}
|
1045 |
|
|
|
1046 |
|
|
/* Detach socket from process context.
|
1047 |
|
|
* Announce socket dead, detach it from wait queue and inode.
|
1048 |
|
|
* Note that parent inode held reference count on this struct sock,
|
1049 |
|
|
* we do not release it in this function, because protocol
|
1050 |
|
|
* probably wants some additional cleanups or even continuing
|
1051 |
|
|
* to work with this socket (TCP).
|
1052 |
|
|
*/
|
1053 |
|
|
static inline void sock_orphan(struct sock *sk)
|
1054 |
|
|
{
|
1055 |
|
|
write_lock_bh(&sk->callback_lock);
|
1056 |
|
|
sk->dead = 1;
|
1057 |
|
|
sk->socket = NULL;
|
1058 |
|
|
sk->sleep = NULL;
|
1059 |
|
|
write_unlock_bh(&sk->callback_lock);
|
1060 |
|
|
}
|
1061 |
|
|
|
1062 |
|
|
static inline void sock_graft(struct sock *sk, struct socket *parent)
|
1063 |
|
|
{
|
1064 |
|
|
write_lock_bh(&sk->callback_lock);
|
1065 |
|
|
sk->sleep = &parent->wait;
|
1066 |
|
|
parent->sk = sk;
|
1067 |
|
|
sk->socket = parent;
|
1068 |
|
|
write_unlock_bh(&sk->callback_lock);
|
1069 |
|
|
}
|
1070 |
|
|
|
1071 |
|
|
static inline int sock_i_uid(struct sock *sk)
|
1072 |
|
|
{
|
1073 |
|
|
int uid;
|
1074 |
|
|
|
1075 |
|
|
read_lock(&sk->callback_lock);
|
1076 |
|
|
uid = sk->socket ? sk->socket->inode->i_uid : 0;
|
1077 |
|
|
read_unlock(&sk->callback_lock);
|
1078 |
|
|
return uid;
|
1079 |
|
|
}
|
1080 |
|
|
|
1081 |
|
|
static inline unsigned long sock_i_ino(struct sock *sk)
|
1082 |
|
|
{
|
1083 |
|
|
unsigned long ino;
|
1084 |
|
|
|
1085 |
|
|
read_lock(&sk->callback_lock);
|
1086 |
|
|
ino = sk->socket ? sk->socket->inode->i_ino : 0;
|
1087 |
|
|
read_unlock(&sk->callback_lock);
|
1088 |
|
|
return ino;
|
1089 |
|
|
}
|
1090 |
|
|
|
1091 |
|
|
static inline struct dst_entry *
|
1092 |
|
|
__sk_dst_get(struct sock *sk)
|
1093 |
|
|
{
|
1094 |
|
|
return sk->dst_cache;
|
1095 |
|
|
}
|
1096 |
|
|
|
1097 |
|
|
static inline struct dst_entry *
|
1098 |
|
|
sk_dst_get(struct sock *sk)
|
1099 |
|
|
{
|
1100 |
|
|
struct dst_entry *dst;
|
1101 |
|
|
|
1102 |
|
|
read_lock(&sk->dst_lock);
|
1103 |
|
|
dst = sk->dst_cache;
|
1104 |
|
|
if (dst)
|
1105 |
|
|
dst_hold(dst);
|
1106 |
|
|
read_unlock(&sk->dst_lock);
|
1107 |
|
|
return dst;
|
1108 |
|
|
}
|
1109 |
|
|
|
1110 |
|
|
static inline void
|
1111 |
|
|
__sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
1112 |
|
|
{
|
1113 |
|
|
struct dst_entry *old_dst;
|
1114 |
|
|
|
1115 |
|
|
old_dst = sk->dst_cache;
|
1116 |
|
|
sk->dst_cache = dst;
|
1117 |
|
|
dst_release(old_dst);
|
1118 |
|
|
}
|
1119 |
|
|
|
1120 |
|
|
static inline void
|
1121 |
|
|
sk_dst_set(struct sock *sk, struct dst_entry *dst)
|
1122 |
|
|
{
|
1123 |
|
|
write_lock(&sk->dst_lock);
|
1124 |
|
|
__sk_dst_set(sk, dst);
|
1125 |
|
|
write_unlock(&sk->dst_lock);
|
1126 |
|
|
}
|
1127 |
|
|
|
1128 |
|
|
static inline void
|
1129 |
|
|
__sk_dst_reset(struct sock *sk)
|
1130 |
|
|
{
|
1131 |
|
|
struct dst_entry *old_dst;
|
1132 |
|
|
|
1133 |
|
|
old_dst = sk->dst_cache;
|
1134 |
|
|
sk->dst_cache = NULL;
|
1135 |
|
|
dst_release(old_dst);
|
1136 |
|
|
}
|
1137 |
|
|
|
1138 |
|
|
static inline void
|
1139 |
|
|
sk_dst_reset(struct sock *sk)
|
1140 |
|
|
{
|
1141 |
|
|
write_lock(&sk->dst_lock);
|
1142 |
|
|
__sk_dst_reset(sk);
|
1143 |
|
|
write_unlock(&sk->dst_lock);
|
1144 |
|
|
}
|
1145 |
|
|
|
1146 |
|
|
static inline struct dst_entry *
|
1147 |
|
|
__sk_dst_check(struct sock *sk, u32 cookie)
|
1148 |
|
|
{
|
1149 |
|
|
struct dst_entry *dst = sk->dst_cache;
|
1150 |
|
|
|
1151 |
|
|
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
1152 |
|
|
sk->dst_cache = NULL;
|
1153 |
|
|
return NULL;
|
1154 |
|
|
}
|
1155 |
|
|
|
1156 |
|
|
return dst;
|
1157 |
|
|
}
|
1158 |
|
|
|
1159 |
|
|
static inline struct dst_entry *
|
1160 |
|
|
sk_dst_check(struct sock *sk, u32 cookie)
|
1161 |
|
|
{
|
1162 |
|
|
struct dst_entry *dst = sk_dst_get(sk);
|
1163 |
|
|
|
1164 |
|
|
if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
1165 |
|
|
sk_dst_reset(sk);
|
1166 |
|
|
return NULL;
|
1167 |
|
|
}
|
1168 |
|
|
|
1169 |
|
|
return dst;
|
1170 |
|
|
}
|
1171 |
|
|
|
1172 |
|
|
|
1173 |
|
|
/*
|
1174 |
|
|
* Queue a received datagram if it will fit. Stream and sequenced
|
1175 |
|
|
* protocols can't normally use this as they need to fit buffers in
|
1176 |
|
|
* and play with them.
|
1177 |
|
|
*
|
1178 |
|
|
* Inlined as it's very short and called for pretty much every
|
1179 |
|
|
* packet ever received.
|
1180 |
|
|
*/
|
1181 |
|
|
|
1182 |
|
|
static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
|
1183 |
|
|
{
|
1184 |
|
|
sock_hold(sk);
|
1185 |
|
|
skb->sk = sk;
|
1186 |
|
|
skb->destructor = sock_wfree;
|
1187 |
|
|
atomic_add(skb->truesize, &sk->wmem_alloc);
|
1188 |
|
|
}
|
1189 |
|
|
|
1190 |
|
|
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
|
1191 |
|
|
{
|
1192 |
|
|
skb->sk = sk;
|
1193 |
|
|
skb->destructor = sock_rfree;
|
1194 |
|
|
atomic_add(skb->truesize, &sk->rmem_alloc);
|
1195 |
|
|
}
|
1196 |
|
|
|
1197 |
|
|
static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
1198 |
|
|
{
|
1199 |
|
|
int err = 0;
|
1200 |
|
|
int skb_len;
|
1201 |
|
|
|
1202 |
|
|
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
1203 |
|
|
number of warnings when compiling with -W --ANK
|
1204 |
|
|
*/
|
1205 |
|
|
if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
|
1206 |
|
|
err = -ENOMEM;
|
1207 |
|
|
goto out;
|
1208 |
|
|
}
|
1209 |
|
|
|
1210 |
|
|
/* It would be deadlock, if sock_queue_rcv_skb is used
|
1211 |
|
|
with socket lock! We assume that users of this
|
1212 |
|
|
function are lock free.
|
1213 |
|
|
*/
|
1214 |
|
|
err = sk_filter(sk, skb, 1);
|
1215 |
|
|
if (err)
|
1216 |
|
|
goto out;
|
1217 |
|
|
|
1218 |
|
|
skb->dev = NULL;
|
1219 |
|
|
skb_set_owner_r(skb, sk);
|
1220 |
|
|
|
1221 |
|
|
/* Cache the SKB length before we tack it onto the receive
|
1222 |
|
|
* queue. Once it is added it no longer belongs to us and
|
1223 |
|
|
* may be freed by other threads of control pulling packets
|
1224 |
|
|
* from the queue.
|
1225 |
|
|
*/
|
1226 |
|
|
skb_len = skb->len;
|
1227 |
|
|
|
1228 |
|
|
skb_queue_tail(&sk->receive_queue, skb);
|
1229 |
|
|
if (!sk->dead)
|
1230 |
|
|
sk->data_ready(sk,skb_len);
|
1231 |
|
|
out:
|
1232 |
|
|
return err;
|
1233 |
|
|
}
|
1234 |
|
|
|
1235 |
|
|
static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
1236 |
|
|
{
|
1237 |
|
|
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
1238 |
|
|
number of warnings when compiling with -W --ANK
|
1239 |
|
|
*/
|
1240 |
|
|
if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf)
|
1241 |
|
|
return -ENOMEM;
|
1242 |
|
|
skb_set_owner_r(skb, sk);
|
1243 |
|
|
skb_queue_tail(&sk->error_queue,skb);
|
1244 |
|
|
if (!sk->dead)
|
1245 |
|
|
sk->data_ready(sk,skb->len);
|
1246 |
|
|
return 0;
|
1247 |
|
|
}
|
1248 |
|
|
|
1249 |
|
|
/*
|
1250 |
|
|
* Recover an error report and clear atomically
|
1251 |
|
|
*/
|
1252 |
|
|
|
1253 |
|
|
static inline int sock_error(struct sock *sk)
|
1254 |
|
|
{
|
1255 |
|
|
int err=xchg(&sk->err,0);
|
1256 |
|
|
return -err;
|
1257 |
|
|
}
|
1258 |
|
|
|
1259 |
|
|
static inline unsigned long sock_wspace(struct sock *sk)
|
1260 |
|
|
{
|
1261 |
|
|
int amt = 0;
|
1262 |
|
|
|
1263 |
|
|
if (!(sk->shutdown & SEND_SHUTDOWN)) {
|
1264 |
|
|
amt = sk->sndbuf - atomic_read(&sk->wmem_alloc);
|
1265 |
|
|
if (amt < 0)
|
1266 |
|
|
amt = 0;
|
1267 |
|
|
}
|
1268 |
|
|
return amt;
|
1269 |
|
|
}
|
1270 |
|
|
|
1271 |
|
|
static inline void sk_wake_async(struct sock *sk, int how, int band)
|
1272 |
|
|
{
|
1273 |
|
|
if (sk->socket && sk->socket->fasync_list)
|
1274 |
|
|
sock_wake_async(sk->socket, how, band);
|
1275 |
|
|
}
|
1276 |
|
|
|
1277 |
|
|
#define SOCK_MIN_SNDBUF 2048
|
1278 |
|
|
#define SOCK_MIN_RCVBUF 256
|
1279 |
|
|
|
1280 |
|
|
/*
|
1281 |
|
|
* Default write policy as shown to user space via poll/select/SIGIO
|
1282 |
|
|
*/
|
1283 |
|
|
static inline int sock_writeable(struct sock *sk)
|
1284 |
|
|
{
|
1285 |
|
|
return atomic_read(&sk->wmem_alloc) < (sk->sndbuf / 2);
|
1286 |
|
|
}
|
1287 |
|
|
|
1288 |
|
|
static inline int gfp_any(void)
|
1289 |
|
|
{
|
1290 |
|
|
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
|
1291 |
|
|
}
|
1292 |
|
|
|
1293 |
|
|
static inline long sock_rcvtimeo(struct sock *sk, int noblock)
|
1294 |
|
|
{
|
1295 |
|
|
return noblock ? 0 : sk->rcvtimeo;
|
1296 |
|
|
}
|
1297 |
|
|
|
1298 |
|
|
static inline long sock_sndtimeo(struct sock *sk, int noblock)
|
1299 |
|
|
{
|
1300 |
|
|
return noblock ? 0 : sk->sndtimeo;
|
1301 |
|
|
}
|
1302 |
|
|
|
1303 |
|
|
static inline int sock_rcvlowat(struct sock *sk, int waitall, int len)
|
1304 |
|
|
{
|
1305 |
|
|
return (waitall ? len : min_t(int, sk->rcvlowat, len)) ? : 1;
|
1306 |
|
|
}
|
1307 |
|
|
|
1308 |
|
|
/* Alas, with timeout socket operations are not restartable.
|
1309 |
|
|
* Compare this to poll().
|
1310 |
|
|
*/
|
1311 |
|
|
static inline int sock_intr_errno(long timeo)
|
1312 |
|
|
{
|
1313 |
|
|
return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
|
1314 |
|
|
}
|
1315 |
|
|
|
1316 |
|
|
static __inline__ void
|
1317 |
|
|
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
|
1318 |
|
|
{
|
1319 |
|
|
if (sk->rcvtstamp)
|
1320 |
|
|
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(skb->stamp), &skb->stamp);
|
1321 |
|
|
else
|
1322 |
|
|
sk->stamp = skb->stamp;
|
1323 |
|
|
}
|
1324 |
|
|
|
1325 |
|
|
/*
|
1326 |
|
|
* Enable debug/info messages
|
1327 |
|
|
*/
|
1328 |
|
|
|
1329 |
|
|
#if 0
|
1330 |
|
|
#define NETDEBUG(x) do { } while (0)
|
1331 |
|
|
#else
|
1332 |
|
|
#define NETDEBUG(x) do { x; } while (0)
|
1333 |
|
|
#endif
|
1334 |
|
|
|
1335 |
|
|
/*
|
1336 |
|
|
* Macros for sleeping on a socket. Use them like this:
|
1337 |
|
|
*
|
1338 |
|
|
* SOCK_SLEEP_PRE(sk)
|
1339 |
|
|
* if (condition)
|
1340 |
|
|
* schedule();
|
1341 |
|
|
* SOCK_SLEEP_POST(sk)
|
1342 |
|
|
*
|
1343 |
|
|
*/
|
1344 |
|
|
|
1345 |
|
|
#define SOCK_SLEEP_PRE(sk) { struct task_struct *tsk = current; \
|
1346 |
|
|
DECLARE_WAITQUEUE(wait, tsk); \
|
1347 |
|
|
tsk->state = TASK_INTERRUPTIBLE; \
|
1348 |
|
|
add_wait_queue((sk)->sleep, &wait); \
|
1349 |
|
|
release_sock(sk);
|
1350 |
|
|
|
1351 |
|
|
#define SOCK_SLEEP_POST(sk) tsk->state = TASK_RUNNING; \
|
1352 |
|
|
remove_wait_queue((sk)->sleep, &wait); \
|
1353 |
|
|
lock_sock(sk); \
|
1354 |
|
|
}
|
1355 |
|
|
|
1356 |
|
|
extern __u32 sysctl_wmem_max;
|
1357 |
|
|
extern __u32 sysctl_rmem_max;
|
1358 |
|
|
|
1359 |
|
|
#endif /* _SOCK_H */
|