1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* NET3: Garbage Collector For AF_UNIX sockets
|
3 |
|
|
*
|
4 |
|
|
* Garbage Collector:
|
5 |
|
|
* Copyright (C) Barak A. Pearlmutter.
|
6 |
|
|
* Released under the GPL version 2 or later.
|
7 |
|
|
*
|
8 |
|
|
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
|
9 |
|
|
* If it doesn't work blame me, it worked when Barak sent it.
|
10 |
|
|
*
|
11 |
|
|
* Assumptions:
|
12 |
|
|
*
|
13 |
|
|
* - object w/ a bit
|
14 |
|
|
* - free list
|
15 |
|
|
*
|
16 |
|
|
* Current optimizations:
|
17 |
|
|
*
|
18 |
|
|
* - explicit stack instead of recursion
|
19 |
|
|
* - tail recurse on first born instead of immediate push/pop
|
20 |
|
|
* - we gather the stuff that should not be killed into tree
|
21 |
|
|
* and stack is just a path from root to the current pointer.
|
22 |
|
|
*
|
23 |
|
|
* Future optimizations:
|
24 |
|
|
*
|
25 |
|
|
* - don't just push entire root set; process in place
|
26 |
|
|
*
|
27 |
|
|
* This program is free software; you can redistribute it and/or
|
28 |
|
|
* modify it under the terms of the GNU General Public License
|
29 |
|
|
* as published by the Free Software Foundation; either version
|
30 |
|
|
* 2 of the License, or (at your option) any later version.
|
31 |
|
|
*
|
32 |
|
|
* Fixes:
|
33 |
|
|
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
|
34 |
|
|
* Cope with changing max_files.
|
35 |
|
|
* Al Viro 11 Oct 1998
|
36 |
|
|
* Graph may have cycles. That is, we can send the descriptor
|
37 |
|
|
* of foo to bar and vice versa. Current code chokes on that.
|
38 |
|
|
* Fix: move SCM_RIGHTS ones into the separate list and then
|
39 |
|
|
* skb_free() them all instead of doing explicit fput's.
|
40 |
|
|
* Another problem: since fput() may block somebody may
|
41 |
|
|
* create a new unix_socket when we are in the middle of sweep
|
42 |
|
|
* phase. Fix: revert the logic wrt MARKED. Mark everything
|
43 |
|
|
* upon the beginning and unmark non-junk ones.
|
44 |
|
|
*
|
45 |
|
|
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
|
46 |
|
|
* sent to connect()'ed but still not accept()'ed sockets.
|
47 |
|
|
* Fixed. Old code had slightly different problem here:
|
48 |
|
|
* extra fput() in situation when we passed the descriptor via
|
49 |
|
|
* such socket and closed it (descriptor). That would happen on
|
50 |
|
|
* each unix_gc() until the accept(). Since the struct file in
|
51 |
|
|
* question would go to the free list and might be reused...
|
52 |
|
|
* That might be the reason of random oopses on filp_close()
|
53 |
|
|
* in unrelated processes.
|
54 |
|
|
*
|
55 |
|
|
* AV 28 Feb 1999
|
56 |
|
|
* Kill the explicit allocation of stack. Now we keep the tree
|
57 |
|
|
* with root in dummy + pointer (gc_current) to one of the nodes.
|
58 |
|
|
* Stack is represented as path from gc_current to dummy. Unmark
|
59 |
|
|
* now means "add to tree". Push == "make it a son of gc_current".
|
60 |
|
|
* Pop == "move gc_current to parent". We keep only pointers to
|
61 |
|
|
* parents (->gc_tree).
|
62 |
|
|
* AV 1 Mar 1999
|
63 |
|
|
* Damn. Added missing check for ->dead in listen queues scanning.
|
64 |
|
|
*
|
65 |
|
|
* Miklos Szeredi 25 Jun 2007
|
66 |
|
|
* Reimplement with a cycle collecting algorithm. This should
|
67 |
|
|
* solve several problems with the previous code, like being racy
|
68 |
|
|
* wrt receive and holding up unrelated socket operations.
|
69 |
|
|
*/
|
70 |
|
|
|
71 |
|
|
#include <linux/kernel.h>
|
72 |
|
|
#include <linux/string.h>
|
73 |
|
|
#include <linux/socket.h>
|
74 |
|
|
#include <linux/un.h>
|
75 |
|
|
#include <linux/net.h>
|
76 |
|
|
#include <linux/fs.h>
|
77 |
|
|
#include <linux/slab.h>
|
78 |
|
|
#include <linux/skbuff.h>
|
79 |
|
|
#include <linux/netdevice.h>
|
80 |
|
|
#include <linux/file.h>
|
81 |
|
|
#include <linux/proc_fs.h>
|
82 |
|
|
#include <linux/mutex.h>
|
83 |
|
|
|
84 |
|
|
#include <net/sock.h>
|
85 |
|
|
#include <net/af_unix.h>
|
86 |
|
|
#include <net/scm.h>
|
87 |
|
|
#include <net/tcp_states.h>
|
88 |
|
|
|
89 |
|
|
/* Internal data structures and random procedures: */
|
90 |
|
|
|
91 |
|
|
static LIST_HEAD(gc_inflight_list);
|
92 |
|
|
static LIST_HEAD(gc_candidates);
|
93 |
|
|
static DEFINE_SPINLOCK(unix_gc_lock);
|
94 |
|
|
|
95 |
|
|
unsigned int unix_tot_inflight;
|
96 |
|
|
|
97 |
|
|
|
98 |
|
|
static struct sock *unix_get_socket(struct file *filp)
|
99 |
|
|
{
|
100 |
|
|
struct sock *u_sock = NULL;
|
101 |
|
|
struct inode *inode = filp->f_path.dentry->d_inode;
|
102 |
|
|
|
103 |
|
|
/*
|
104 |
|
|
* Socket ?
|
105 |
|
|
*/
|
106 |
|
|
if (S_ISSOCK(inode->i_mode)) {
|
107 |
|
|
struct socket * sock = SOCKET_I(inode);
|
108 |
|
|
struct sock * s = sock->sk;
|
109 |
|
|
|
110 |
|
|
/*
|
111 |
|
|
* PF_UNIX ?
|
112 |
|
|
*/
|
113 |
|
|
if (s && sock->ops && sock->ops->family == PF_UNIX)
|
114 |
|
|
u_sock = s;
|
115 |
|
|
}
|
116 |
|
|
return u_sock;
|
117 |
|
|
}
|
118 |
|
|
|
119 |
|
|
/*
|
120 |
|
|
* Keep the number of times in flight count for the file
|
121 |
|
|
* descriptor if it is for an AF_UNIX socket.
|
122 |
|
|
*/
|
123 |
|
|
|
124 |
|
|
void unix_inflight(struct file *fp)
|
125 |
|
|
{
|
126 |
|
|
struct sock *s = unix_get_socket(fp);
|
127 |
|
|
if(s) {
|
128 |
|
|
struct unix_sock *u = unix_sk(s);
|
129 |
|
|
spin_lock(&unix_gc_lock);
|
130 |
|
|
if (atomic_inc_return(&u->inflight) == 1) {
|
131 |
|
|
BUG_ON(!list_empty(&u->link));
|
132 |
|
|
list_add_tail(&u->link, &gc_inflight_list);
|
133 |
|
|
} else {
|
134 |
|
|
BUG_ON(list_empty(&u->link));
|
135 |
|
|
}
|
136 |
|
|
unix_tot_inflight++;
|
137 |
|
|
spin_unlock(&unix_gc_lock);
|
138 |
|
|
}
|
139 |
|
|
}
|
140 |
|
|
|
141 |
|
|
void unix_notinflight(struct file *fp)
|
142 |
|
|
{
|
143 |
|
|
struct sock *s = unix_get_socket(fp);
|
144 |
|
|
if(s) {
|
145 |
|
|
struct unix_sock *u = unix_sk(s);
|
146 |
|
|
spin_lock(&unix_gc_lock);
|
147 |
|
|
BUG_ON(list_empty(&u->link));
|
148 |
|
|
if (atomic_dec_and_test(&u->inflight))
|
149 |
|
|
list_del_init(&u->link);
|
150 |
|
|
unix_tot_inflight--;
|
151 |
|
|
spin_unlock(&unix_gc_lock);
|
152 |
|
|
}
|
153 |
|
|
}
|
154 |
|
|
|
155 |
|
|
static inline struct sk_buff *sock_queue_head(struct sock *sk)
|
156 |
|
|
{
|
157 |
|
|
return (struct sk_buff *) &sk->sk_receive_queue;
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
#define receive_queue_for_each_skb(sk, next, skb) \
|
161 |
|
|
for (skb = sock_queue_head(sk)->next, next = skb->next; \
|
162 |
|
|
skb != sock_queue_head(sk); skb = next, next = skb->next)
|
163 |
|
|
|
164 |
|
|
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
|
165 |
|
|
struct sk_buff_head *hitlist)
|
166 |
|
|
{
|
167 |
|
|
struct sk_buff *skb;
|
168 |
|
|
struct sk_buff *next;
|
169 |
|
|
|
170 |
|
|
spin_lock(&x->sk_receive_queue.lock);
|
171 |
|
|
receive_queue_for_each_skb(x, next, skb) {
|
172 |
|
|
/*
|
173 |
|
|
* Do we have file descriptors ?
|
174 |
|
|
*/
|
175 |
|
|
if (UNIXCB(skb).fp) {
|
176 |
|
|
bool hit = false;
|
177 |
|
|
/*
|
178 |
|
|
* Process the descriptors of this socket
|
179 |
|
|
*/
|
180 |
|
|
int nfd = UNIXCB(skb).fp->count;
|
181 |
|
|
struct file **fp = UNIXCB(skb).fp->fp;
|
182 |
|
|
while (nfd--) {
|
183 |
|
|
/*
|
184 |
|
|
* Get the socket the fd matches
|
185 |
|
|
* if it indeed does so
|
186 |
|
|
*/
|
187 |
|
|
struct sock *sk = unix_get_socket(*fp++);
|
188 |
|
|
if (sk) {
|
189 |
|
|
hit = true;
|
190 |
|
|
func(unix_sk(sk));
|
191 |
|
|
}
|
192 |
|
|
}
|
193 |
|
|
if (hit && hitlist != NULL) {
|
194 |
|
|
__skb_unlink(skb, &x->sk_receive_queue);
|
195 |
|
|
__skb_queue_tail(hitlist, skb);
|
196 |
|
|
}
|
197 |
|
|
}
|
198 |
|
|
}
|
199 |
|
|
spin_unlock(&x->sk_receive_queue.lock);
|
200 |
|
|
}
|
201 |
|
|
|
202 |
|
|
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
|
203 |
|
|
struct sk_buff_head *hitlist)
|
204 |
|
|
{
|
205 |
|
|
if (x->sk_state != TCP_LISTEN)
|
206 |
|
|
scan_inflight(x, func, hitlist);
|
207 |
|
|
else {
|
208 |
|
|
struct sk_buff *skb;
|
209 |
|
|
struct sk_buff *next;
|
210 |
|
|
struct unix_sock *u;
|
211 |
|
|
LIST_HEAD(embryos);
|
212 |
|
|
|
213 |
|
|
/*
|
214 |
|
|
* For a listening socket collect the queued embryos
|
215 |
|
|
* and perform a scan on them as well.
|
216 |
|
|
*/
|
217 |
|
|
spin_lock(&x->sk_receive_queue.lock);
|
218 |
|
|
receive_queue_for_each_skb(x, next, skb) {
|
219 |
|
|
u = unix_sk(skb->sk);
|
220 |
|
|
|
221 |
|
|
/*
|
222 |
|
|
* An embryo cannot be in-flight, so it's safe
|
223 |
|
|
* to use the list link.
|
224 |
|
|
*/
|
225 |
|
|
BUG_ON(!list_empty(&u->link));
|
226 |
|
|
list_add_tail(&u->link, &embryos);
|
227 |
|
|
}
|
228 |
|
|
spin_unlock(&x->sk_receive_queue.lock);
|
229 |
|
|
|
230 |
|
|
while (!list_empty(&embryos)) {
|
231 |
|
|
u = list_entry(embryos.next, struct unix_sock, link);
|
232 |
|
|
scan_inflight(&u->sk, func, hitlist);
|
233 |
|
|
list_del_init(&u->link);
|
234 |
|
|
}
|
235 |
|
|
}
|
236 |
|
|
}
|
237 |
|
|
|
238 |
|
|
static void dec_inflight(struct unix_sock *usk)
|
239 |
|
|
{
|
240 |
|
|
atomic_dec(&usk->inflight);
|
241 |
|
|
}
|
242 |
|
|
|
243 |
|
|
static void inc_inflight(struct unix_sock *usk)
|
244 |
|
|
{
|
245 |
|
|
atomic_inc(&usk->inflight);
|
246 |
|
|
}
|
247 |
|
|
|
248 |
|
|
static void inc_inflight_move_tail(struct unix_sock *u)
|
249 |
|
|
{
|
250 |
|
|
atomic_inc(&u->inflight);
|
251 |
|
|
/*
|
252 |
|
|
* If this is still a candidate, move it to the end of the
|
253 |
|
|
* list, so that it's checked even if it was already passed
|
254 |
|
|
* over
|
255 |
|
|
*/
|
256 |
|
|
if (u->gc_candidate)
|
257 |
|
|
list_move_tail(&u->link, &gc_candidates);
|
258 |
|
|
}
|
259 |
|
|
|
260 |
|
|
/* The external entry point: unix_gc() */
|
261 |
|
|
|
262 |
|
|
void unix_gc(void)
|
263 |
|
|
{
|
264 |
|
|
static bool gc_in_progress = false;
|
265 |
|
|
|
266 |
|
|
struct unix_sock *u;
|
267 |
|
|
struct unix_sock *next;
|
268 |
|
|
struct sk_buff_head hitlist;
|
269 |
|
|
struct list_head cursor;
|
270 |
|
|
|
271 |
|
|
spin_lock(&unix_gc_lock);
|
272 |
|
|
|
273 |
|
|
/* Avoid a recursive GC. */
|
274 |
|
|
if (gc_in_progress)
|
275 |
|
|
goto out;
|
276 |
|
|
|
277 |
|
|
gc_in_progress = true;
|
278 |
|
|
/*
|
279 |
|
|
* First, select candidates for garbage collection. Only
|
280 |
|
|
* in-flight sockets are considered, and from those only ones
|
281 |
|
|
* which don't have any external reference.
|
282 |
|
|
*
|
283 |
|
|
* Holding unix_gc_lock will protect these candidates from
|
284 |
|
|
* being detached, and hence from gaining an external
|
285 |
|
|
* reference. This also means, that since there are no
|
286 |
|
|
* possible receivers, the receive queues of these sockets are
|
287 |
|
|
* static during the GC, even though the dequeue is done
|
288 |
|
|
* before the detach without atomicity guarantees.
|
289 |
|
|
*/
|
290 |
|
|
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
|
291 |
|
|
int total_refs;
|
292 |
|
|
int inflight_refs;
|
293 |
|
|
|
294 |
|
|
total_refs = file_count(u->sk.sk_socket->file);
|
295 |
|
|
inflight_refs = atomic_read(&u->inflight);
|
296 |
|
|
|
297 |
|
|
BUG_ON(inflight_refs < 1);
|
298 |
|
|
BUG_ON(total_refs < inflight_refs);
|
299 |
|
|
if (total_refs == inflight_refs) {
|
300 |
|
|
list_move_tail(&u->link, &gc_candidates);
|
301 |
|
|
u->gc_candidate = 1;
|
302 |
|
|
}
|
303 |
|
|
}
|
304 |
|
|
|
305 |
|
|
/*
|
306 |
|
|
* Now remove all internal in-flight reference to children of
|
307 |
|
|
* the candidates.
|
308 |
|
|
*/
|
309 |
|
|
list_for_each_entry(u, &gc_candidates, link)
|
310 |
|
|
scan_children(&u->sk, dec_inflight, NULL);
|
311 |
|
|
|
312 |
|
|
/*
|
313 |
|
|
* Restore the references for children of all candidates,
|
314 |
|
|
* which have remaining references. Do this recursively, so
|
315 |
|
|
* only those remain, which form cyclic references.
|
316 |
|
|
*
|
317 |
|
|
* Use a "cursor" link, to make the list traversal safe, even
|
318 |
|
|
* though elements might be moved about.
|
319 |
|
|
*/
|
320 |
|
|
list_add(&cursor, &gc_candidates);
|
321 |
|
|
while (cursor.next != &gc_candidates) {
|
322 |
|
|
u = list_entry(cursor.next, struct unix_sock, link);
|
323 |
|
|
|
324 |
|
|
/* Move cursor to after the current position. */
|
325 |
|
|
list_move(&cursor, &u->link);
|
326 |
|
|
|
327 |
|
|
if (atomic_read(&u->inflight) > 0) {
|
328 |
|
|
list_move_tail(&u->link, &gc_inflight_list);
|
329 |
|
|
u->gc_candidate = 0;
|
330 |
|
|
scan_children(&u->sk, inc_inflight_move_tail, NULL);
|
331 |
|
|
}
|
332 |
|
|
}
|
333 |
|
|
list_del(&cursor);
|
334 |
|
|
|
335 |
|
|
/*
|
336 |
|
|
* Now gc_candidates contains only garbage. Restore original
|
337 |
|
|
* inflight counters for these as well, and remove the skbuffs
|
338 |
|
|
* which are creating the cycle(s).
|
339 |
|
|
*/
|
340 |
|
|
skb_queue_head_init(&hitlist);
|
341 |
|
|
list_for_each_entry(u, &gc_candidates, link)
|
342 |
|
|
scan_children(&u->sk, inc_inflight, &hitlist);
|
343 |
|
|
|
344 |
|
|
spin_unlock(&unix_gc_lock);
|
345 |
|
|
|
346 |
|
|
/* Here we are. Hitlist is filled. Die. */
|
347 |
|
|
__skb_queue_purge(&hitlist);
|
348 |
|
|
|
349 |
|
|
spin_lock(&unix_gc_lock);
|
350 |
|
|
|
351 |
|
|
/* All candidates should have been detached by now. */
|
352 |
|
|
BUG_ON(!list_empty(&gc_candidates));
|
353 |
|
|
gc_in_progress = false;
|
354 |
|
|
|
355 |
|
|
out:
|
356 |
|
|
spin_unlock(&unix_gc_lock);
|
357 |
|
|
}
|