1 |
1633 |
jcastillo |
/*
|
2 |
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
3 |
|
|
* operating system. INET is implemented using the BSD Socket
|
4 |
|
|
* interface as the means of communication with the user level.
|
5 |
|
|
*
|
6 |
|
|
* Definitions for the AF_INET socket handler.
|
7 |
|
|
*
|
8 |
|
|
* Version: @(#)sock.h 1.0.4 05/13/93
|
9 |
|
|
*
|
10 |
|
|
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
|
11 |
|
|
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
|
12 |
|
|
* Corey Minyard <wf-rch!minyard@relay.EU.net>
|
13 |
|
|
* Florian La Roche <flla@stud.uni-sb.de>
|
14 |
|
|
*
|
15 |
|
|
* Fixes:
|
16 |
|
|
* Alan Cox : Volatiles in skbuff pointers. See
|
17 |
|
|
* skbuff comments. May be overdone,
|
18 |
|
|
* better to prove they can be removed
|
19 |
|
|
* than the reverse.
|
20 |
|
|
* Alan Cox : Added a zapped field for tcp to note
|
21 |
|
|
* a socket is reset and must stay shut up
|
22 |
|
|
* Alan Cox : New fields for options
|
23 |
|
|
* Pauline Middelink : identd support
|
24 |
|
|
* Alan Cox : Eliminate low level recv/recvfrom
|
25 |
|
|
* David S. Miller : New socket lookup architecture for ISS.
|
26 |
|
|
* Elliot Poger : New field for SO_BINDTODEVICE option.
|
27 |
|
|
*
|
28 |
|
|
* This program is free software; you can redistribute it and/or
|
29 |
|
|
* modify it under the terms of the GNU General Public License
|
30 |
|
|
* as published by the Free Software Foundation; either version
|
31 |
|
|
* 2 of the License, or (at your option) any later version.
|
32 |
|
|
*/
|
33 |
|
|
#ifndef _SOCK_H
|
34 |
|
|
#define _SOCK_H
|
35 |
|
|
|
36 |
|
|
#include <linux/timer.h>
|
37 |
|
|
#include <linux/ip.h> /* struct options */
|
38 |
|
|
#include <linux/in.h> /* struct sockaddr_in */
|
39 |
|
|
#include <linux/tcp.h> /* struct tcphdr */
|
40 |
|
|
#include <linux/config.h>
|
41 |
|
|
|
42 |
|
|
#include <linux/netdevice.h>
|
43 |
|
|
#include <linux/skbuff.h> /* struct sk_buff */
|
44 |
|
|
#include <net/protocol.h> /* struct inet_protocol */
|
45 |
|
|
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
46 |
|
|
#include <net/ax25.h>
|
47 |
|
|
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
|
48 |
|
|
#include <net/netrom.h>
|
49 |
|
|
#endif
|
50 |
|
|
#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
|
51 |
|
|
#include <net/rose.h>
|
52 |
|
|
#endif
|
53 |
|
|
#endif
|
54 |
|
|
|
55 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
56 |
|
|
#include <net/ipx.h>
|
57 |
|
|
#endif
|
58 |
|
|
|
59 |
|
|
#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
|
60 |
|
|
#include <linux/atalk.h>
|
61 |
|
|
#endif
|
62 |
|
|
|
63 |
|
|
#include <linux/igmp.h>
|
64 |
|
|
|
65 |
|
|
#include <asm/atomic.h>
|
66 |
|
|
|
67 |
|
|
/*
|
68 |
|
|
* The AF_UNIX specific socket options
|
69 |
|
|
*/
|
70 |
|
|
|
71 |
|
|
struct unix_opt
|
72 |
|
|
{
|
73 |
|
|
int family;
|
74 |
|
|
char * name;
|
75 |
|
|
int locks;
|
76 |
|
|
struct inode * inode;
|
77 |
|
|
struct semaphore readsem;
|
78 |
|
|
struct sock * other;
|
79 |
|
|
int marksweep;
|
80 |
|
|
#define MARKED 1
|
81 |
|
|
int inflight;
|
82 |
|
|
};
|
83 |
|
|
|
84 |
|
|
/*
|
85 |
|
|
* IP packet socket options
|
86 |
|
|
*/
|
87 |
|
|
|
88 |
|
|
struct inet_packet_opt
|
89 |
|
|
{
|
90 |
|
|
struct notifier_block notifier; /* Used when bound */
|
91 |
|
|
struct device *bound_dev;
|
92 |
|
|
unsigned long dev_stamp;
|
93 |
|
|
struct packet_type *prot_hook;
|
94 |
|
|
char device_name[15];
|
95 |
|
|
};
|
96 |
|
|
|
97 |
|
|
/*
|
98 |
|
|
* Once the IPX ncpd patches are in these are going into protinfo
|
99 |
|
|
*/
|
100 |
|
|
|
101 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
102 |
|
|
struct ipx_opt
|
103 |
|
|
{
|
104 |
|
|
ipx_address dest_addr;
|
105 |
|
|
ipx_interface *intrfc;
|
106 |
|
|
unsigned short port;
|
107 |
|
|
#ifdef CONFIG_IPX_INTERN
|
108 |
|
|
unsigned char node[IPX_NODE_LEN];
|
109 |
|
|
#endif
|
110 |
|
|
unsigned short type;
|
111 |
|
|
/*
|
112 |
|
|
* To handle asynchronous messages from the NetWare server, we have to
|
113 |
|
|
* know the connection this socket belongs to.
|
114 |
|
|
*/
|
115 |
|
|
struct ncp_server *ncp_server;
|
116 |
|
|
/*
|
117 |
|
|
* To handle special ncp connection-handling sockets for mars_nwe,
|
118 |
|
|
* the connection number must be stored in the socket.
|
119 |
|
|
*/
|
120 |
|
|
unsigned short ipx_ncp_conn;
|
121 |
|
|
};
|
122 |
|
|
#endif
|
123 |
|
|
|
124 |
|
|
#ifdef CONFIG_NUTCP
|
125 |
|
|
struct tcp_opt
|
126 |
|
|
{
|
127 |
|
|
/*
|
128 |
|
|
* RFC793 variables by their proper names. This means you can
|
129 |
|
|
* read the code and the spec side by side (and laugh ...)
|
130 |
|
|
* See RFC793 and RFC1122. The RFC writes these in capitals.
|
131 |
|
|
*/
|
132 |
|
|
__u32 rcv_nxt; /* What we want to receive next */
|
133 |
|
|
__u32 rcv_up; /* The urgent point (may not be valid) */
|
134 |
|
|
__u32 rcv_wnd; /* Current receiver window */
|
135 |
|
|
__u32 snd_nxt; /* Next sequence we send */
|
136 |
|
|
__u32 snd_una; /* First byte we want an ack for */
|
137 |
|
|
__u32 snd_up; /* Outgoing urgent pointer */
|
138 |
|
|
__u32 snd_wl1; /* Sequence for window update */
|
139 |
|
|
__u32 snd_wl2; /* Ack sequence for update */
|
140 |
|
|
/*
|
141 |
|
|
* Slow start and congestion control (see also Nagle, and Karn & Partridge)
|
142 |
|
|
*/
|
143 |
|
|
__u32 snd_cwnd; /* Sending congestion window */
|
144 |
|
|
__u32 snd_ssthresh; /* Slow start size threshold */
|
145 |
|
|
/*
|
146 |
|
|
* Timers used by the TCP protocol layer
|
147 |
|
|
*/
|
148 |
|
|
struct timer_list delack_timer; /* Ack delay */
|
149 |
|
|
struct timer_list idle_timer; /* Idle watch */
|
150 |
|
|
struct timer_list completion_timer; /* Up/Down timer */
|
151 |
|
|
struct timer_list probe_timer; /* Probes */
|
152 |
|
|
struct timer_list retransmit_timer; /* Resend (no ack) */
|
153 |
|
|
};
|
154 |
|
|
#endif
|
155 |
|
|
|
156 |
|
|
/*
|
157 |
|
|
* This structure really needs to be cleaned up.
|
158 |
|
|
* Most of it is for TCP, and not used by any of
|
159 |
|
|
* the other protocols.
|
160 |
|
|
*/
|
161 |
|
|
struct sock
|
162 |
|
|
{
|
163 |
|
|
/* This must be first. */
|
164 |
|
|
struct sock *sklist_next;
|
165 |
|
|
struct sock *sklist_prev;
|
166 |
|
|
|
167 |
|
|
struct options *opt;
|
168 |
|
|
atomic_t wmem_alloc;
|
169 |
|
|
atomic_t rmem_alloc;
|
170 |
|
|
unsigned long allocation; /* Allocation mode */
|
171 |
|
|
__u32 write_seq;
|
172 |
|
|
__u32 sent_seq;
|
173 |
|
|
__u32 acked_seq;
|
174 |
|
|
__u32 copied_seq;
|
175 |
|
|
__u32 rcv_ack_seq;
|
176 |
|
|
unsigned short rcv_ack_cnt; /* count of same ack */
|
177 |
|
|
__u32 window_seq;
|
178 |
|
|
__u32 fin_seq;
|
179 |
|
|
__u32 urg_seq;
|
180 |
|
|
__u32 urg_data;
|
181 |
|
|
__u32 syn_seq;
|
182 |
|
|
int users; /* user count */
|
183 |
|
|
/*
|
184 |
|
|
* Not all are volatile, but some are, so we
|
185 |
|
|
* might as well say they all are.
|
186 |
|
|
*/
|
187 |
|
|
volatile char dead,
|
188 |
|
|
urginline,
|
189 |
|
|
intr,
|
190 |
|
|
blog,
|
191 |
|
|
done,
|
192 |
|
|
reuse,
|
193 |
|
|
keepopen,
|
194 |
|
|
linger,
|
195 |
|
|
delay_acks,
|
196 |
|
|
destroy,
|
197 |
|
|
ack_timed,
|
198 |
|
|
no_check,
|
199 |
|
|
zapped, /* In ax25 & ipx means not linked */
|
200 |
|
|
broadcast,
|
201 |
|
|
nonagle,
|
202 |
|
|
bsdism;
|
203 |
|
|
struct device * bound_device;
|
204 |
|
|
unsigned long lingertime;
|
205 |
|
|
int proc;
|
206 |
|
|
|
207 |
|
|
struct sock *next;
|
208 |
|
|
struct sock **pprev;
|
209 |
|
|
struct sock *bind_next;
|
210 |
|
|
struct sock **bind_pprev;
|
211 |
|
|
struct sock *pair;
|
212 |
|
|
int hashent;
|
213 |
|
|
struct sock *prev;
|
214 |
|
|
struct sk_buff * volatile send_head;
|
215 |
|
|
struct sk_buff * volatile send_next;
|
216 |
|
|
struct sk_buff * volatile send_tail;
|
217 |
|
|
struct sk_buff_head back_log;
|
218 |
|
|
struct sk_buff *partial;
|
219 |
|
|
struct timer_list partial_timer;
|
220 |
|
|
long retransmits;
|
221 |
|
|
struct sk_buff_head write_queue,
|
222 |
|
|
receive_queue;
|
223 |
|
|
struct proto *prot;
|
224 |
|
|
struct wait_queue **sleep;
|
225 |
|
|
__u32 daddr;
|
226 |
|
|
__u32 saddr; /* Sending source */
|
227 |
|
|
__u32 rcv_saddr; /* Bound address */
|
228 |
|
|
unsigned short max_unacked;
|
229 |
|
|
unsigned short window;
|
230 |
|
|
__u32 lastwin_seq; /* sequence number when we last updated the window we offer */
|
231 |
|
|
__u32 high_seq; /* sequence number when we did current fast retransmit */
|
232 |
|
|
volatile unsigned long ato; /* ack timeout */
|
233 |
|
|
volatile unsigned long lrcvtime; /* jiffies at last data rcv */
|
234 |
|
|
volatile unsigned long idletime; /* jiffies at last rcv */
|
235 |
|
|
unsigned int bytes_rcv;
|
236 |
|
|
/*
|
237 |
|
|
* mss is min(mtu, max_window)
|
238 |
|
|
*/
|
239 |
|
|
unsigned short mtu; /* mss negotiated in the syn's */
|
240 |
|
|
volatile unsigned short mss; /* current eff. mss - can change */
|
241 |
|
|
volatile unsigned short user_mss; /* mss requested by user in ioctl */
|
242 |
|
|
volatile unsigned short max_window;
|
243 |
|
|
unsigned long window_clamp;
|
244 |
|
|
unsigned int ssthresh;
|
245 |
|
|
unsigned short num;
|
246 |
|
|
volatile unsigned short cong_window;
|
247 |
|
|
volatile unsigned short cong_count;
|
248 |
|
|
volatile unsigned short packets_out;
|
249 |
|
|
volatile unsigned short shutdown;
|
250 |
|
|
volatile unsigned long rtt;
|
251 |
|
|
volatile unsigned long mdev;
|
252 |
|
|
volatile unsigned long rto;
|
253 |
|
|
|
254 |
|
|
/*
|
255 |
|
|
* currently backoff isn't used, but I'm maintaining it in case
|
256 |
|
|
* we want to go back to a backoff formula that needs it
|
257 |
|
|
*/
|
258 |
|
|
|
259 |
|
|
volatile unsigned short backoff;
|
260 |
|
|
int err, err_soft; /* Soft holds errors that don't
|
261 |
|
|
cause failure but are the cause
|
262 |
|
|
of a persistent failure not just
|
263 |
|
|
'timed out' */
|
264 |
|
|
unsigned char protocol;
|
265 |
|
|
volatile unsigned char state;
|
266 |
|
|
unsigned short ack_backlog;
|
267 |
|
|
unsigned char priority;
|
268 |
|
|
unsigned char debug;
|
269 |
|
|
int rcvbuf;
|
270 |
|
|
int sndbuf;
|
271 |
|
|
unsigned short type;
|
272 |
|
|
unsigned char localroute; /* Route locally only */
|
273 |
|
|
|
274 |
|
|
/*
|
275 |
|
|
* This is where all the private (optional) areas that don't
|
276 |
|
|
* overlap will eventually live.
|
277 |
|
|
*/
|
278 |
|
|
|
279 |
|
|
union
|
280 |
|
|
{
|
281 |
|
|
struct unix_opt af_unix;
|
282 |
|
|
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
|
283 |
|
|
ax25_cb *ax25;
|
284 |
|
|
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
|
285 |
|
|
nr_cb *nr;
|
286 |
|
|
#endif
|
287 |
|
|
#if defined(CONFIG_ROSE) || defined(CONFIG_ROSE_MODULE)
|
288 |
|
|
rose_cb *rose;
|
289 |
|
|
#endif
|
290 |
|
|
#endif
|
291 |
|
|
#if defined(CONFIG_ATALK) || defined(CONFIG_ATALK_MODULE)
|
292 |
|
|
struct atalk_sock af_at;
|
293 |
|
|
#endif
|
294 |
|
|
#if defined(CONFIG_IPX) || defined(CONFIG_IPX_MODULE)
|
295 |
|
|
struct ipx_opt af_ipx;
|
296 |
|
|
#endif
|
297 |
|
|
#ifdef CONFIG_INET
|
298 |
|
|
struct inet_packet_opt af_packet;
|
299 |
|
|
#ifdef CONFIG_NUTCP
|
300 |
|
|
struct tcp_opt af_tcp;
|
301 |
|
|
#endif
|
302 |
|
|
#endif
|
303 |
|
|
} protinfo;
|
304 |
|
|
|
305 |
|
|
/*
|
306 |
|
|
* IP 'private area' or will be eventually
|
307 |
|
|
*/
|
308 |
|
|
int ip_ttl; /* TTL setting */
|
309 |
|
|
int ip_tos; /* TOS */
|
310 |
|
|
struct tcphdr dummy_th;
|
311 |
|
|
struct timer_list keepalive_timer; /* TCP keepalive hack */
|
312 |
|
|
struct timer_list retransmit_timer; /* TCP retransmit timer */
|
313 |
|
|
struct timer_list delack_timer; /* TCP delayed ack timer */
|
314 |
|
|
int ip_xmit_timeout; /* Why the timeout is running */
|
315 |
|
|
struct rtable *ip_route_cache; /* Cached output route */
|
316 |
|
|
unsigned char ip_hdrincl; /* Include headers ? */
|
317 |
|
|
#ifdef CONFIG_IP_MULTICAST
|
318 |
|
|
int ip_mc_ttl; /* Multicasting TTL */
|
319 |
|
|
int ip_mc_loop; /* Loopback */
|
320 |
|
|
char ip_mc_name[MAX_ADDR_LEN];/* Multicast device name */
|
321 |
|
|
struct ip_mc_socklist *ip_mc_list; /* Group array */
|
322 |
|
|
#endif
|
323 |
|
|
|
324 |
|
|
/*
|
325 |
|
|
* This part is used for the timeout functions (timer.c).
|
326 |
|
|
*/
|
327 |
|
|
|
328 |
|
|
int timeout; /* What are we waiting for? */
|
329 |
|
|
struct timer_list timer; /* This is the TIME_WAIT/receive timer
|
330 |
|
|
* when we are doing IP
|
331 |
|
|
*/
|
332 |
|
|
struct timeval stamp;
|
333 |
|
|
|
334 |
|
|
/*
|
335 |
|
|
* Identd
|
336 |
|
|
*/
|
337 |
|
|
|
338 |
|
|
struct socket *socket;
|
339 |
|
|
|
340 |
|
|
/*
|
341 |
|
|
* Callbacks
|
342 |
|
|
*/
|
343 |
|
|
|
344 |
|
|
void (*state_change)(struct sock *sk);
|
345 |
|
|
void (*data_ready)(struct sock *sk,int bytes);
|
346 |
|
|
void (*write_space)(struct sock *sk);
|
347 |
|
|
void (*error_report)(struct sock *sk);
|
348 |
|
|
|
349 |
|
|
/*
|
350 |
|
|
* Moved solely for 2.0 to keep binary module compatibility stuff straight.
|
351 |
|
|
*/
|
352 |
|
|
|
353 |
|
|
unsigned short max_ack_backlog;
|
354 |
|
|
struct sock *listening;
|
355 |
|
|
};
|
356 |
|
|
|
357 |
|
|
/*
|
358 |
|
|
* IP protocol blocks we attach to sockets.
|
359 |
|
|
*/
|
360 |
|
|
|
361 |
|
|
struct proto
|
362 |
|
|
{
|
363 |
|
|
/* These must be first. */
|
364 |
|
|
struct sock *sklist_next;
|
365 |
|
|
struct sock *sklist_prev;
|
366 |
|
|
|
367 |
|
|
void (*close)(struct sock *sk, unsigned long timeout);
|
368 |
|
|
int (*build_header)(struct sk_buff *skb,
|
369 |
|
|
__u32 saddr,
|
370 |
|
|
__u32 daddr,
|
371 |
|
|
struct device **dev, int type,
|
372 |
|
|
struct options *opt, int len,
|
373 |
|
|
int tos, int ttl, struct rtable ** rp);
|
374 |
|
|
int (*connect)(struct sock *sk,
|
375 |
|
|
struct sockaddr_in *usin, int addr_len);
|
376 |
|
|
struct sock * (*accept) (struct sock *sk, int flags);
|
377 |
|
|
void (*queue_xmit)(struct sock *sk,
|
378 |
|
|
struct device *dev, struct sk_buff *skb,
|
379 |
|
|
int free);
|
380 |
|
|
void (*retransmit)(struct sock *sk, int all);
|
381 |
|
|
void (*write_wakeup)(struct sock *sk);
|
382 |
|
|
void (*read_wakeup)(struct sock *sk);
|
383 |
|
|
int (*rcv)(struct sk_buff *buff, struct device *dev,
|
384 |
|
|
struct options *opt, __u32 daddr,
|
385 |
|
|
unsigned short len, __u32 saddr,
|
386 |
|
|
int redo, struct inet_protocol *protocol);
|
387 |
|
|
int (*select)(struct sock *sk, int which,
|
388 |
|
|
select_table *wait);
|
389 |
|
|
int (*ioctl)(struct sock *sk, int cmd,
|
390 |
|
|
unsigned long arg);
|
391 |
|
|
int (*init)(struct sock *sk);
|
392 |
|
|
void (*shutdown)(struct sock *sk, int how);
|
393 |
|
|
int (*setsockopt)(struct sock *sk, int level, int optname,
|
394 |
|
|
char *optval, int optlen);
|
395 |
|
|
int (*getsockopt)(struct sock *sk, int level, int optname,
|
396 |
|
|
char *optval, int *option);
|
397 |
|
|
int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len,
|
398 |
|
|
int noblock, int flags);
|
399 |
|
|
int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len,
|
400 |
|
|
int noblock, int flags, int *addr_len);
|
401 |
|
|
int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len);
|
402 |
|
|
|
403 |
|
|
/* Keeping track of sk's, looking them up, and port selection methods. */
|
404 |
|
|
void (*hash)(struct sock *sk);
|
405 |
|
|
void (*unhash)(struct sock *sk);
|
406 |
|
|
void (*rehash)(struct sock *sk);
|
407 |
|
|
unsigned short (*good_socknum)(void);
|
408 |
|
|
int (*verify_bind)(struct sock *sk, unsigned short snum);
|
409 |
|
|
|
410 |
|
|
unsigned short max_header;
|
411 |
|
|
unsigned long retransmits;
|
412 |
|
|
char name[32];
|
413 |
|
|
int inuse, highestinuse;
|
414 |
|
|
};
|
415 |
|
|
|
416 |
|
|
#define TIME_WRITE 1
|
417 |
|
|
#define TIME_CLOSE 2
|
418 |
|
|
#define TIME_KEEPOPEN 3
|
419 |
|
|
#define TIME_DESTROY 4
|
420 |
|
|
#define TIME_DONE 5 /* Used to absorb those last few packets */
|
421 |
|
|
#define TIME_PROBE0 6
|
422 |
|
|
|
423 |
|
|
/*
|
424 |
|
|
* About 10 seconds
|
425 |
|
|
*/
|
426 |
|
|
|
427 |
|
|
#define SOCK_DESTROY_TIME (10*HZ)
|
428 |
|
|
|
429 |
|
|
/*
|
430 |
|
|
* Sockets 0-1023 can't be bound to unless you are superuser
|
431 |
|
|
*/
|
432 |
|
|
|
433 |
|
|
#define PROT_SOCK 1024
|
434 |
|
|
|
435 |
|
|
#define SHUTDOWN_MASK 3
|
436 |
|
|
#define RCV_SHUTDOWN 1
|
437 |
|
|
#define SEND_SHUTDOWN 2
|
438 |
|
|
|
439 |
|
|
/* Per-protocol hash table implementations use this to make sure
|
440 |
|
|
* nothing changes.
|
441 |
|
|
*/
|
442 |
|
|
#define SOCKHASH_LOCK() start_bh_atomic()
|
443 |
|
|
#define SOCKHASH_UNLOCK() end_bh_atomic()
|
444 |
|
|
|
445 |
|
|
/* Some things in the kernel just want to get at a protocols
|
446 |
|
|
* entire socket list commensurate, thus...
|
447 |
|
|
*/
|
448 |
|
|
static __inline__ void add_to_prot_sklist(struct sock *sk)
|
449 |
|
|
{
|
450 |
|
|
SOCKHASH_LOCK();
|
451 |
|
|
if(!sk->sklist_next) {
|
452 |
|
|
struct proto *p = sk->prot;
|
453 |
|
|
|
454 |
|
|
sk->sklist_prev = (struct sock *) p;
|
455 |
|
|
sk->sklist_next = p->sklist_next;
|
456 |
|
|
p->sklist_next->sklist_prev = sk;
|
457 |
|
|
p->sklist_next = sk;
|
458 |
|
|
|
459 |
|
|
/* Charge the protocol. */
|
460 |
|
|
sk->prot->inuse += 1;
|
461 |
|
|
if(sk->prot->highestinuse < sk->prot->inuse)
|
462 |
|
|
sk->prot->highestinuse = sk->prot->inuse;
|
463 |
|
|
}
|
464 |
|
|
SOCKHASH_UNLOCK();
|
465 |
|
|
}
|
466 |
|
|
|
467 |
|
|
static __inline__ void del_from_prot_sklist(struct sock *sk)
|
468 |
|
|
{
|
469 |
|
|
SOCKHASH_LOCK();
|
470 |
|
|
if(sk->sklist_next) {
|
471 |
|
|
sk->sklist_next->sklist_prev = sk->sklist_prev;
|
472 |
|
|
sk->sklist_prev->sklist_next = sk->sklist_next;
|
473 |
|
|
sk->sklist_next = NULL;
|
474 |
|
|
sk->prot->inuse--;
|
475 |
|
|
}
|
476 |
|
|
SOCKHASH_UNLOCK();
|
477 |
|
|
}
|
478 |
|
|
|
479 |
|
|
/*
|
480 |
|
|
* Used by processes to "lock" a socket state, so that
|
481 |
|
|
* interrupts and bottom half handlers won't change it
|
482 |
|
|
* from under us. It essentially blocks any incoming
|
483 |
|
|
* packets, so that we won't get any new data or any
|
484 |
|
|
* packets that change the state of the socket.
|
485 |
|
|
*
|
486 |
|
|
* Note the 'barrier()' calls: gcc may not move a lock
|
487 |
|
|
* "downwards" or a unlock "upwards" when optimizing.
|
488 |
|
|
*/
|
489 |
|
|
extern void __release_sock(struct sock *sk);
|
490 |
|
|
|
491 |
|
|
static inline void lock_sock(struct sock *sk)
|
492 |
|
|
{
|
493 |
|
|
#if 0
|
494 |
|
|
/* debugging code: the test isn't even 100% correct, but it can catch bugs */
|
495 |
|
|
/* Note that a double lock is ok in theory - it's just _usually_ a bug */
|
496 |
|
|
if (sk->users) {
|
497 |
|
|
__label__ here;
|
498 |
|
|
printk("double lock on socket at %p\n", &&here);
|
499 |
|
|
here:
|
500 |
|
|
}
|
501 |
|
|
#endif
|
502 |
|
|
sk->users++;
|
503 |
|
|
barrier();
|
504 |
|
|
}
|
505 |
|
|
|
506 |
|
|
static inline void release_sock(struct sock *sk)
|
507 |
|
|
{
|
508 |
|
|
barrier();
|
509 |
|
|
#if 0
|
510 |
|
|
/* debugging code: remove me when ok */
|
511 |
|
|
if (sk->users == 0) {
|
512 |
|
|
__label__ here;
|
513 |
|
|
sk->users = 1;
|
514 |
|
|
printk("trying to unlock unlocked socket at %p\n", &&here);
|
515 |
|
|
here:
|
516 |
|
|
}
|
517 |
|
|
#endif
|
518 |
|
|
if ((sk->users = sk->users-1) == 0)
|
519 |
|
|
__release_sock(sk);
|
520 |
|
|
}
|
521 |
|
|
|
522 |
|
|
|
523 |
|
|
extern struct sock * sk_alloc(int priority);
|
524 |
|
|
extern void sk_free(struct sock *sk);
|
525 |
|
|
extern void destroy_sock(struct sock *sk);
|
526 |
|
|
|
527 |
|
|
extern struct sk_buff *sock_wmalloc(struct sock *sk,
|
528 |
|
|
unsigned long size, int force,
|
529 |
|
|
int priority);
|
530 |
|
|
extern struct sk_buff *sock_rmalloc(struct sock *sk,
|
531 |
|
|
unsigned long size, int force,
|
532 |
|
|
int priority);
|
533 |
|
|
extern void sock_wfree(struct sock *sk,
|
534 |
|
|
struct sk_buff *skb);
|
535 |
|
|
extern void sock_rfree(struct sock *sk,
|
536 |
|
|
struct sk_buff *skb);
|
537 |
|
|
extern unsigned long sock_rspace(struct sock *sk);
|
538 |
|
|
extern unsigned long sock_wspace(struct sock *sk);
|
539 |
|
|
|
540 |
|
|
extern int sock_setsockopt(struct sock *sk, int level,
|
541 |
|
|
int op, char *optval,
|
542 |
|
|
int optlen);
|
543 |
|
|
|
544 |
|
|
extern int sock_getsockopt(struct sock *sk, int level,
|
545 |
|
|
int op, char *optval,
|
546 |
|
|
int *optlen);
|
547 |
|
|
extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
|
548 |
|
|
unsigned long size,
|
549 |
|
|
unsigned long fallback,
|
550 |
|
|
int noblock,
|
551 |
|
|
int *errcode);
|
552 |
|
|
|
553 |
|
|
/*
|
554 |
|
|
* Queue a received datagram if it will fit. Stream and sequenced
|
555 |
|
|
* protocols can't normally use this as they need to fit buffers in
|
556 |
|
|
* and play with them.
|
557 |
|
|
*
|
558 |
|
|
* Inlined as it's very short and called for pretty much every
|
559 |
|
|
* packet ever received.
|
560 |
|
|
*/
|
561 |
|
|
|
562 |
|
|
extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
563 |
|
|
{
|
564 |
|
|
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
|
565 |
|
|
return -ENOMEM;
|
566 |
|
|
atomic_add(skb->truesize, &sk->rmem_alloc);
|
567 |
|
|
skb->sk=sk;
|
568 |
|
|
skb_queue_tail(&sk->receive_queue,skb);
|
569 |
|
|
if (!sk->dead)
|
570 |
|
|
sk->data_ready(sk,skb->len);
|
571 |
|
|
return 0;
|
572 |
|
|
}
|
573 |
|
|
|
574 |
|
|
extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
575 |
|
|
{
|
576 |
|
|
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
|
577 |
|
|
return -ENOMEM;
|
578 |
|
|
atomic_add(skb->truesize, &sk->rmem_alloc);
|
579 |
|
|
skb->sk=sk;
|
580 |
|
|
__skb_queue_tail(&sk->receive_queue,skb);
|
581 |
|
|
if (!sk->dead)
|
582 |
|
|
sk->data_ready(sk,skb->len);
|
583 |
|
|
return 0;
|
584 |
|
|
}
|
585 |
|
|
|
586 |
|
|
/*
|
587 |
|
|
* Recover an error report and clear atomically
|
588 |
|
|
*/
|
589 |
|
|
|
590 |
|
|
extern __inline__ int sock_error(struct sock *sk)
|
591 |
|
|
{
|
592 |
|
|
int err=xchg(&sk->err,0);
|
593 |
|
|
return -err;
|
594 |
|
|
}
|
595 |
|
|
|
596 |
|
|
/*
|
597 |
|
|
* Declarations from timer.c
|
598 |
|
|
*/
|
599 |
|
|
|
600 |
|
|
extern struct sock *timer_base;
|
601 |
|
|
|
602 |
|
|
extern void delete_timer (struct sock *);
|
603 |
|
|
extern void reset_timer (struct sock *, int, unsigned long);
|
604 |
|
|
extern void net_timer (unsigned long);
|
605 |
|
|
|
606 |
|
|
|
607 |
|
|
/*
|
608 |
|
|
* Enable debug/info messages
|
609 |
|
|
*/
|
610 |
|
|
|
611 |
|
|
#define NETDEBUG(x) do { } while (0)
|
612 |
|
|
|
613 |
|
|
#endif /* _SOCK_H */
|