OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [fs/] [file_table.c] - Blame information for rev 78

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/fs/file_table.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *  Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6
 */
7
 
8
#include <linux/string.h>
9
#include <linux/slab.h>
10
#include <linux/file.h>
11
#include <linux/init.h>
12
#include <linux/module.h>
13
#include <linux/fs.h>
14
#include <linux/security.h>
15
#include <linux/eventpoll.h>
16
#include <linux/rcupdate.h>
17
#include <linux/mount.h>
18
#include <linux/capability.h>
19
#include <linux/cdev.h>
20
#include <linux/fsnotify.h>
21
#include <linux/sysctl.h>
22
#include <linux/percpu_counter.h>
23
 
24
#include <asm/atomic.h>
25
 
26
/* sysctl tunables... */
27
struct files_stat_struct files_stat = {
28
        .max_files = NR_FILE
29
};
30
 
31
/* public. Not pretty! */
32
__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
33
 
34
static struct percpu_counter nr_files __cacheline_aligned_in_smp;
35
 
36
static inline void file_free_rcu(struct rcu_head *head)
37
{
38
        struct file *f =  container_of(head, struct file, f_u.fu_rcuhead);
39
        kmem_cache_free(filp_cachep, f);
40
}
41
 
42
static inline void file_free(struct file *f)
43
{
44
        percpu_counter_dec(&nr_files);
45
        call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
46
}
47
 
48
/*
49
 * Return the total number of open files in the system
50
 */
51
static int get_nr_files(void)
52
{
53
        return percpu_counter_read_positive(&nr_files);
54
}
55
 
56
/*
57
 * Return the maximum number of open files in the system
58
 */
59
int get_max_files(void)
60
{
61
        return files_stat.max_files;
62
}
63
EXPORT_SYMBOL_GPL(get_max_files);
64
 
65
/*
66
 * Handle nr_files sysctl
67
 */
68
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
69
int proc_nr_files(ctl_table *table, int write, struct file *filp,
70
                     void __user *buffer, size_t *lenp, loff_t *ppos)
71
{
72
        files_stat.nr_files = get_nr_files();
73
        return proc_dointvec(table, write, filp, buffer, lenp, ppos);
74
}
75
#else
76
int proc_nr_files(ctl_table *table, int write, struct file *filp,
77
                     void __user *buffer, size_t *lenp, loff_t *ppos)
78
{
79
        return -ENOSYS;
80
}
81
#endif
82
 
83
/* Find an unused file structure and return a pointer to it.
84
 * Returns NULL, if there are no more free file structures or
85
 * we run out of memory.
86
 */
87
struct file *get_empty_filp(void)
88
{
89
        struct task_struct *tsk;
90
        static int old_max;
91
        struct file * f;
92
 
93
        /*
94
         * Privileged users can go above max_files
95
         */
96
        if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
97
                /*
98
                 * percpu_counters are inaccurate.  Do an expensive check before
99
                 * we go and fail.
100
                 */
101
                if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
102
                        goto over;
103
        }
104
 
105
        f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
106
        if (f == NULL)
107
                goto fail;
108
 
109
        percpu_counter_inc(&nr_files);
110
        if (security_file_alloc(f))
111
                goto fail_sec;
112
 
113
        tsk = current;
114
        INIT_LIST_HEAD(&f->f_u.fu_list);
115
        atomic_set(&f->f_count, 1);
116
        rwlock_init(&f->f_owner.lock);
117
        f->f_uid = tsk->fsuid;
118
        f->f_gid = tsk->fsgid;
119
        eventpoll_init_file(f);
120
        /* f->f_version: 0 */
121
        return f;
122
 
123
over:
124
        /* Ran out of filps - report that */
125
        if (get_nr_files() > old_max) {
126
                printk(KERN_INFO "VFS: file-max limit %d reached\n",
127
                                        get_max_files());
128
                old_max = get_nr_files();
129
        }
130
        goto fail;
131
 
132
fail_sec:
133
        file_free(f);
134
fail:
135
        return NULL;
136
}
137
 
138
EXPORT_SYMBOL(get_empty_filp);
139
 
140
/**
141
 * alloc_file - allocate and initialize a 'struct file'
142
 * @mnt: the vfsmount on which the file will reside
143
 * @dentry: the dentry representing the new file
144
 * @mode: the mode with which the new file will be opened
145
 * @fop: the 'struct file_operations' for the new file
146
 *
147
 * Use this instead of get_empty_filp() to get a new
148
 * 'struct file'.  Do so because of the same initialization
149
 * pitfalls reasons listed for init_file().  This is a
150
 * preferred interface to using init_file().
151
 *
152
 * If all the callers of init_file() are eliminated, its
153
 * code should be moved into this function.
154
 */
155
struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
156
                mode_t mode, const struct file_operations *fop)
157
{
158
        struct file *file;
159
        struct path;
160
 
161
        file = get_empty_filp();
162
        if (!file)
163
                return NULL;
164
 
165
        init_file(file, mnt, dentry, mode, fop);
166
        return file;
167
}
168
EXPORT_SYMBOL(alloc_file);
169
 
170
/**
171
 * init_file - initialize a 'struct file'
172
 * @file: the already allocated 'struct file' to initialized
173
 * @mnt: the vfsmount on which the file resides
174
 * @dentry: the dentry representing this file
175
 * @mode: the mode the file is opened with
176
 * @fop: the 'struct file_operations' for this file
177
 *
178
 * Use this instead of setting the members directly.  Doing so
179
 * avoids making mistakes like forgetting the mntget() or
180
 * forgetting to take a write on the mnt.
181
 *
182
 * Note: This is a crappy interface.  It is here to make
183
 * merging with the existing users of get_empty_filp()
184
 * who have complex failure logic easier.  All users
185
 * of this should be moving to alloc_file().
186
 */
187
int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
188
           mode_t mode, const struct file_operations *fop)
189
{
190
        int error = 0;
191
        file->f_path.dentry = dentry;
192
        file->f_path.mnt = mntget(mnt);
193
        file->f_mapping = dentry->d_inode->i_mapping;
194
        file->f_mode = mode;
195
        file->f_op = fop;
196
        return error;
197
}
198
EXPORT_SYMBOL(init_file);
199
 
200
void fastcall fput(struct file *file)
201
{
202
        if (atomic_dec_and_test(&file->f_count))
203
                __fput(file);
204
}
205
 
206
EXPORT_SYMBOL(fput);
207
 
208
/* __fput is called from task context when aio completion releases the last
209
 * last use of a struct file *.  Do not use otherwise.
210
 */
211
void fastcall __fput(struct file *file)
212
{
213
        struct dentry *dentry = file->f_path.dentry;
214
        struct vfsmount *mnt = file->f_path.mnt;
215
        struct inode *inode = dentry->d_inode;
216
 
217
        might_sleep();
218
 
219
        fsnotify_close(file);
220
        /*
221
         * The function eventpoll_release() should be the first called
222
         * in the file cleanup chain.
223
         */
224
        eventpoll_release(file);
225
        locks_remove_flock(file);
226
 
227
        if (file->f_op && file->f_op->release)
228
                file->f_op->release(inode, file);
229
        security_file_free(file);
230
        if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
231
                cdev_put(inode->i_cdev);
232
        fops_put(file->f_op);
233
        if (file->f_mode & FMODE_WRITE)
234
                put_write_access(inode);
235
        put_pid(file->f_owner.pid);
236
        file_kill(file);
237
        file->f_path.dentry = NULL;
238
        file->f_path.mnt = NULL;
239
        file_free(file);
240
        dput(dentry);
241
        mntput(mnt);
242
}
243
 
244
struct file fastcall *fget(unsigned int fd)
245
{
246
        struct file *file;
247
        struct files_struct *files = current->files;
248
 
249
        rcu_read_lock();
250
        file = fcheck_files(files, fd);
251
        if (file) {
252
                if (!atomic_inc_not_zero(&file->f_count)) {
253
                        /* File object ref couldn't be taken */
254
                        rcu_read_unlock();
255
                        return NULL;
256
                }
257
        }
258
        rcu_read_unlock();
259
 
260
        return file;
261
}
262
 
263
EXPORT_SYMBOL(fget);
264
 
265
/*
266
 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
267
 * You can use this only if it is guranteed that the current task already
268
 * holds a refcnt to that file. That check has to be done at fget() only
269
 * and a flag is returned to be passed to the corresponding fput_light().
270
 * There must not be a cloning between an fget_light/fput_light pair.
271
 */
272
struct file fastcall *fget_light(unsigned int fd, int *fput_needed)
273
{
274
        struct file *file;
275
        struct files_struct *files = current->files;
276
 
277
        *fput_needed = 0;
278
        if (likely((atomic_read(&files->count) == 1))) {
279
                file = fcheck_files(files, fd);
280
        } else {
281
                rcu_read_lock();
282
                file = fcheck_files(files, fd);
283
                if (file) {
284
                        if (atomic_inc_not_zero(&file->f_count))
285
                                *fput_needed = 1;
286
                        else
287
                                /* Didn't get the reference, someone's freed */
288
                                file = NULL;
289
                }
290
                rcu_read_unlock();
291
        }
292
 
293
        return file;
294
}
295
 
296
 
297
void put_filp(struct file *file)
298
{
299
        if (atomic_dec_and_test(&file->f_count)) {
300
                security_file_free(file);
301
                file_kill(file);
302
                file_free(file);
303
        }
304
}
305
 
306
void file_move(struct file *file, struct list_head *list)
307
{
308
        if (!list)
309
                return;
310
        file_list_lock();
311
        list_move(&file->f_u.fu_list, list);
312
        file_list_unlock();
313
}
314
 
315
void file_kill(struct file *file)
316
{
317
        if (!list_empty(&file->f_u.fu_list)) {
318
                file_list_lock();
319
                list_del_init(&file->f_u.fu_list);
320
                file_list_unlock();
321
        }
322
}
323
 
324
int fs_may_remount_ro(struct super_block *sb)
325
{
326
        struct file *file;
327
 
328
        /* Check that no files are currently opened for writing. */
329
        file_list_lock();
330
        list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
331
                struct inode *inode = file->f_path.dentry->d_inode;
332
 
333
                /* File with pending delete? */
334
                if (inode->i_nlink == 0)
335
                        goto too_bad;
336
 
337
                /* Writeable file? */
338
                if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
339
                        goto too_bad;
340
        }
341
        file_list_unlock();
342
        return 1; /* Tis' cool bro. */
343
too_bad:
344
        file_list_unlock();
345
        return 0;
346
}
347
 
348
void __init files_init(unsigned long mempages)
349
{
350
        int n;
351
        /* One file with associated inode and dcache is very roughly 1K.
352
         * Per default don't use more than 10% of our memory for files.
353
         */
354
 
355
        n = (mempages * (PAGE_SIZE / 1024)) / 10;
356
        files_stat.max_files = n;
357
        if (files_stat.max_files < NR_FILE)
358
                files_stat.max_files = NR_FILE;
359
        files_defer_init();
360
        percpu_counter_init(&nr_files, 0);
361
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.