OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [unix/] [garbage.c] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * NET3:        Garbage Collector For AF_UNIX sockets
3
 *
4
 * Garbage Collector:
5
 *      Copyright (C) Barak A. Pearlmutter.
6
 *      Released under the GPL version 2 or later.
7
 *
8
 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9
 * If it doesn't work blame me, it worked when Barak sent it.
10
 *
11
 * Assumptions:
12
 *
13
 *  - object w/ a bit
14
 *  - free list
15
 *
16
 * Current optimizations:
17
 *
18
 *  - explicit stack instead of recursion
19
 *  - tail recurse on first born instead of immediate push/pop
20
 *  - we gather the stuff that should not be killed into tree
21
 *    and stack is just a path from root to the current pointer.
22
 *
23
 *  Future optimizations:
24
 *
25
 *  - don't just push entire root set; process in place
26
 *
27
 *      This program is free software; you can redistribute it and/or
28
 *      modify it under the terms of the GNU General Public License
29
 *      as published by the Free Software Foundation; either version
30
 *      2 of the License, or (at your option) any later version.
31
 *
32
 *  Fixes:
33
 *      Alan Cox        07 Sept 1997    Vmalloc internal stack as needed.
34
 *                                      Cope with changing max_files.
35
 *      Al Viro         11 Oct 1998
36
 *              Graph may have cycles. That is, we can send the descriptor
37
 *              of foo to bar and vice versa. Current code chokes on that.
38
 *              Fix: move SCM_RIGHTS ones into the separate list and then
39
 *              skb_free() them all instead of doing explicit fput's.
40
 *              Another problem: since fput() may block somebody may
41
 *              create a new unix_socket when we are in the middle of sweep
42
 *              phase. Fix: revert the logic wrt MARKED. Mark everything
43
 *              upon the beginning and unmark non-junk ones.
44
 *
45
 *              [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46
 *              sent to connect()'ed but still not accept()'ed sockets.
47
 *              Fixed. Old code had slightly different problem here:
48
 *              extra fput() in situation when we passed the descriptor via
49
 *              such socket and closed it (descriptor). That would happen on
50
 *              each unix_gc() until the accept(). Since the struct file in
51
 *              question would go to the free list and might be reused...
52
 *              That might be the reason of random oopses on filp_close()
53
 *              in unrelated processes.
54
 *
55
 *      AV              28 Feb 1999
56
 *              Kill the explicit allocation of stack. Now we keep the tree
57
 *              with root in dummy + pointer (gc_current) to one of the nodes.
58
 *              Stack is represented as path from gc_current to dummy. Unmark
59
 *              now means "add to tree". Push == "make it a son of gc_current".
60
 *              Pop == "move gc_current to parent". We keep only pointers to
61
 *              parents (->gc_tree).
62
 *      AV              1 Mar 1999
63
 *              Damn. Added missing check for ->dead in listen queues scanning.
64
 *
65
 */
66
 
67
#include <linux/kernel.h>
68
#include <linux/sched.h>
69
#include <linux/string.h>
70
#include <linux/socket.h>
71
#include <linux/un.h>
72
#include <linux/net.h>
73
#include <linux/fs.h>
74
#include <linux/slab.h>
75
#include <linux/skbuff.h>
76
#include <linux/netdevice.h>
77
#include <linux/file.h>
78
#include <linux/proc_fs.h>
79
#include <linux/tcp.h>
80
 
81
#include <net/sock.h>
82
#include <net/af_unix.h>
83
#include <net/scm.h>
84
 
85
/* Internal data structures and random procedures: */
86
 
87
#define GC_HEAD         ((unix_socket *)(-1))
88
#define GC_ORPHAN       ((unix_socket *)(-3))
89
 
90
static unix_socket *gc_current=GC_HEAD; /* stack of objects to mark */
91
 
92
atomic_t unix_tot_inflight = ATOMIC_INIT(0);
93
 
94
 
95
extern inline unix_socket *unix_get_socket(struct file *filp)
96
{
97
        unix_socket * u_sock = NULL;
98
        struct inode *inode = filp->f_dentry->d_inode;
99
 
100
        /*
101
         *      Socket ?
102
         */
103
        if (inode->i_sock) {
104
                struct socket * sock = &inode->u.socket_i;
105
                struct sock * s = sock->sk;
106
 
107
                /*
108
                 *      PF_UNIX ?
109
                 */
110
                if (s && sock->ops && sock->ops->family == PF_UNIX)
111
                        u_sock = s;
112
        }
113
        return u_sock;
114
}
115
 
116
/*
117
 *      Keep the number of times in flight count for the file
118
 *      descriptor if it is for an AF_UNIX socket.
119
 */
120
 
121
void unix_inflight(struct file *fp)
122
{
123
        unix_socket *s=unix_get_socket(fp);
124
        if(s) {
125
                atomic_inc(&s->protinfo.af_unix.inflight);
126
                atomic_inc(&unix_tot_inflight);
127
        }
128
}
129
 
130
void unix_notinflight(struct file *fp)
131
{
132
        unix_socket *s=unix_get_socket(fp);
133
        if(s) {
134
                atomic_dec(&s->protinfo.af_unix.inflight);
135
                atomic_dec(&unix_tot_inflight);
136
        }
137
}
138
 
139
 
140
/*
141
 *      Garbage Collector Support Functions
142
 */
143
 
144
extern inline unix_socket *pop_stack(void)
145
{
146
        unix_socket *p=gc_current;
147
        gc_current = p->protinfo.af_unix.gc_tree;
148
        return p;
149
}
150
 
151
extern inline int empty_stack(void)
152
{
153
        return gc_current == GC_HEAD;
154
}
155
 
156
extern inline void maybe_unmark_and_push(unix_socket *x)
157
{
158
        if (x->protinfo.af_unix.gc_tree != GC_ORPHAN)
159
                return;
160
        sock_hold(x);
161
        x->protinfo.af_unix.gc_tree = gc_current;
162
        gc_current = x;
163
}
164
 
165
 
166
/* The external entry point: unix_gc() */
167
 
168
void unix_gc(void)
169
{
170
        static DECLARE_MUTEX(unix_gc_sem);
171
        int i;
172
        unix_socket *s;
173
        struct sk_buff_head hitlist;
174
        struct sk_buff *skb;
175
 
176
        /*
177
         *      Avoid a recursive GC.
178
         */
179
 
180
        if (down_trylock(&unix_gc_sem))
181
                return;
182
 
183
        read_lock(&unix_table_lock);
184
 
185
        forall_unix_sockets(i, s)
186
        {
187
                s->protinfo.af_unix.gc_tree=GC_ORPHAN;
188
        }
189
        /*
190
         *      Everything is now marked
191
         */
192
 
193
        /* Invariant to be maintained:
194
                - everything unmarked is either:
195
                -- (a) on the stack, or
196
                -- (b) has all of its children unmarked
197
                - everything on the stack is always unmarked
198
                - nothing is ever pushed onto the stack twice, because:
199
                -- nothing previously unmarked is ever pushed on the stack
200
         */
201
 
202
        /*
203
         *      Push root set
204
         */
205
 
206
        forall_unix_sockets(i, s)
207
        {
208
                int open_count = 0;
209
 
210
                /*
211
                 *      If all instances of the descriptor are not
212
                 *      in flight we are in use.
213
                 *
214
                 *      Special case: when socket s is embrion, it may be
215
                 *      hashed but still not in queue of listening socket.
216
                 *      In this case (see unix_create1()) we set artificial
217
                 *      negative inflight counter to close race window.
218
                 *      It is trick of course and dirty one.
219
                 */
220
                if(s->socket && s->socket->file)
221
                        open_count = file_count(s->socket->file);
222
                if (open_count > atomic_read(&s->protinfo.af_unix.inflight))
223
                        maybe_unmark_and_push(s);
224
        }
225
 
226
        /*
227
         *      Mark phase
228
         */
229
 
230
        while (!empty_stack())
231
        {
232
                unix_socket *x = pop_stack();
233
                unix_socket *sk;
234
 
235
                spin_lock(&x->receive_queue.lock);
236
                skb=skb_peek(&x->receive_queue);
237
 
238
                /*
239
                 *      Loop through all but first born
240
                 */
241
 
242
                while(skb && skb != (struct sk_buff *)&x->receive_queue)
243
                {
244
                        /*
245
                         *      Do we have file descriptors ?
246
                         */
247
                        if(UNIXCB(skb).fp)
248
                        {
249
                                /*
250
                                 *      Process the descriptors of this socket
251
                                 */
252
                                int nfd=UNIXCB(skb).fp->count;
253
                                struct file **fp = UNIXCB(skb).fp->fp;
254
                                while(nfd--)
255
                                {
256
                                        /*
257
                                         *      Get the socket the fd matches if
258
                                         *      it indeed does so
259
                                         */
260
                                        if((sk=unix_get_socket(*fp++))!=NULL)
261
                                        {
262
                                                maybe_unmark_and_push(sk);
263
                                        }
264
                                }
265
                        }
266
                        /* We have to scan not-yet-accepted ones too */
267
                        if (x->state == TCP_LISTEN) {
268
                                maybe_unmark_and_push(skb->sk);
269
                        }
270
                        skb=skb->next;
271
                }
272
                spin_unlock(&x->receive_queue.lock);
273
                sock_put(x);
274
        }
275
 
276
        skb_queue_head_init(&hitlist);
277
 
278
        forall_unix_sockets(i, s)
279
        {
280
                if (s->protinfo.af_unix.gc_tree == GC_ORPHAN)
281
                {
282
                        struct sk_buff *nextsk;
283
                        spin_lock(&s->receive_queue.lock);
284
                        skb=skb_peek(&s->receive_queue);
285
                        while(skb && skb != (struct sk_buff *)&s->receive_queue)
286
                        {
287
                                nextsk=skb->next;
288
                                /*
289
                                 *      Do we have file descriptors ?
290
                                 */
291
                                if(UNIXCB(skb).fp)
292
                                {
293
                                        __skb_unlink(skb, skb->list);
294
                                        __skb_queue_tail(&hitlist,skb);
295
                                }
296
                                skb=nextsk;
297
                        }
298
                        spin_unlock(&s->receive_queue.lock);
299
                }
300
                s->protinfo.af_unix.gc_tree = GC_ORPHAN;
301
        }
302
        read_unlock(&unix_table_lock);
303
 
304
        /*
305
         *      Here we are. Hitlist is filled. Die.
306
         */
307
 
308
        __skb_queue_purge(&hitlist);
309
        up(&unix_gc_sem);
310
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.