OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [before_ORP/] [uclinux/] [uClinux-2.0.x/] [fs/] [inode.c] - Blame information for rev 901

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/*
2
 *  linux/fs/inode.c
3
 *
4
 *  Copyright (C) 1991, 1992  Linus Torvalds
5
 *
6
 */
7
 
8
/*
9
 * uClinux revisions for NO_MM
10
 * Copyright (C) 1998  Kenneth Albanowski <kjahds@kjahds.com>,
11
 *                     The Silver Hammer Group, Ltd.
12
 */
13
 
14
#include <linux/stat.h>
15
#include <linux/sched.h>
16
#include <linux/kernel.h>
17
#include <linux/mm.h>
18
#include <linux/string.h>
19
 
20
#include <asm/system.h>
21
 
22
#define NR_IHASH 512
23
 
24
/*
25
 * Be VERY careful when you access the inode hash table. There
26
 * are some rather scary race conditions you need to take care of:
27
 *  - P1 tries to open file "xx", calls "iget()" with the proper
28
 *    inode number, but blocks because it's not on the list.
29
 *  - P2 deletes file "xx", gets the inode (which P1 has just read,
30
 *    but P1 hasn't woken up to the fact yet)
31
 *  - P2 iput()'s the inode, which now has i_nlink = 0
32
 *  - P1 wakes up and has the inode, but now P2 has made that
33
 *    inode invalid (but P1 has no way of knowing that).
34
 *
35
 * The "updating" counter makes sure that when P1 blocks on the
36
 * iget(), P2 can't delete the inode from under it because P2
37
 * will wait until P1 has been able to update the inode usage
38
 * count so that the inode will stay in use until everybody has
39
 * closed it..
40
 */
41
static struct inode_hash_entry {
42
        struct inode * inode;
43
        int updating;
44
} hash_table[NR_IHASH];
45
 
46
static struct inode * first_inode;
47
static struct wait_queue * inode_wait = NULL;
48
/* Keep these next two contiguous in memory for sysctl.c */
49
int nr_inodes = 0, nr_free_inodes = 0;
50
int max_inodes = NR_INODE;
51
 
52
static inline int const hashfn(kdev_t dev, unsigned int i)
53
{
54
        return (HASHDEV(dev) ^ i) % NR_IHASH;
55
}
56
 
57
static inline struct inode_hash_entry * const hash(kdev_t dev, int i)
58
{
59
        return hash_table + hashfn(dev, i);
60
}
61
 
62
static inline void insert_inode_free(struct inode *inode)
63
{
64
        struct inode * prev, * next = first_inode;
65
 
66
        first_inode = inode;
67
        prev = next->i_prev;
68
        inode->i_next = next;
69
        inode->i_prev = prev;
70
        prev->i_next = inode;
71
        next->i_prev = inode;
72
}
73
 
74
static inline void remove_inode_free(struct inode *inode)
75
{
76
        if (first_inode == inode)
77
                first_inode = first_inode->i_next;
78
        if (inode->i_next)
79
                inode->i_next->i_prev = inode->i_prev;
80
        if (inode->i_prev)
81
                inode->i_prev->i_next = inode->i_next;
82
        inode->i_next = inode->i_prev = NULL;
83
}
84
 
85
void insert_inode_hash(struct inode *inode)
86
{
87
        struct inode_hash_entry *h;
88
        h = hash(inode->i_dev, inode->i_ino);
89
 
90
        inode->i_hash_next = h->inode;
91
        inode->i_hash_prev = NULL;
92
        if (inode->i_hash_next)
93
                inode->i_hash_next->i_hash_prev = inode;
94
        h->inode = inode;
95
}
96
 
97
static inline void remove_inode_hash(struct inode *inode)
98
{
99
        struct inode_hash_entry *h;
100
        h = hash(inode->i_dev, inode->i_ino);
101
 
102
        if (h->inode == inode)
103
                h->inode = inode->i_hash_next;
104
        if (inode->i_hash_next)
105
                inode->i_hash_next->i_hash_prev = inode->i_hash_prev;
106
        if (inode->i_hash_prev)
107
                inode->i_hash_prev->i_hash_next = inode->i_hash_next;
108
        inode->i_hash_prev = inode->i_hash_next = NULL;
109
}
110
 
111
static inline void put_last_free(struct inode *inode)
112
{
113
        remove_inode_free(inode);
114
        inode->i_prev = first_inode->i_prev;
115
        inode->i_prev->i_next = inode;
116
        inode->i_next = first_inode;
117
        inode->i_next->i_prev = inode;
118
}
119
 
120
int grow_inodes(void)
121
{
122
        struct inode * inode;
123
        int i;
124
 
125
        if (!(inode = (struct inode*) get_free_page(GFP_KERNEL)))
126
                return -ENOMEM;
127
 
128
        i=PAGE_SIZE / sizeof(struct inode);
129
        nr_inodes += i;
130
        nr_free_inodes += i;
131
 
132
        if (!first_inode)
133
                inode->i_next = inode->i_prev = first_inode = inode++, i--;
134
 
135
        for ( ; i ; i-- )
136
                insert_inode_free(inode++);
137
        return 0;
138
}
139
 
140
unsigned long inode_init(unsigned long start, unsigned long end)
141
{
142
        memset(hash_table, 0, sizeof(hash_table));
143
        first_inode = NULL;
144
        return start;
145
}
146
 
147
static void __wait_on_inode(struct inode *);
148
 
149
static inline void wait_on_inode(struct inode * inode)
150
{
151
        if (inode->i_lock)
152
                __wait_on_inode(inode);
153
}
154
 
155
static inline void lock_inode(struct inode * inode)
156
{
157
        wait_on_inode(inode);
158
        inode->i_lock = 1;
159
}
160
 
161
static inline void unlock_inode(struct inode * inode)
162
{
163
        inode->i_lock = 0;
164
        wake_up(&inode->i_wait);
165
}
166
 
167
/*
168
 * Note that we don't want to disturb any wait-queues when we discard
169
 * an inode.
170
 *
171
 * Argghh. Got bitten by a gcc problem with inlining: no way to tell
172
 * the compiler that the inline asm function 'memset' changes 'inode'.
173
 * I've been searching for the bug for days, and was getting desperate.
174
 * Finally looked at the assembler output... Grrr.
175
 *
176
 * The solution is the weird use of 'volatile'. Ho humm. Have to report
177
 * it to the gcc lists, and hope we can do this more cleanly some day..
178
 */
179
void clear_inode(struct inode * inode)
180
{
181
        struct wait_queue * wait;
182
 
183
        /*
184
         * We can clear inodes either when a last deref to the inode
185
         * causes it to be deleted (reference count==1), or when we want to
186
         * reuse it (reference count==0).  Any other count is an error.
187
         */
188
        if (inode->i_count > 1)
189
                panic ("clear_inode: Inode still has references");
190
 
191
        /*
192
         * We are about to zap this inode.  This operation may block,
193
         * and it's imperative that we don't allow another process to
194
         * grab it before it is completely pulled down.  The i_count
195
         * will prevent reuse of the inode by get_empty_inode(), but the
196
         * i_condemned flag will also prevent __iget() from finding the
197
         * inode until it is completely dead.
198
         */
199
        inode->i_condemned = 1;
200
        inode->i_count++;
201
 
202
        truncate_inode_pages(inode, 0);
203
        wait_on_inode(inode);
204
        if (IS_WRITABLE(inode)) {
205
                if (inode->i_sb && inode->i_sb->dq_op)
206
                        inode->i_sb->dq_op->drop(inode);
207
        }
208
        remove_inode_hash(inode);
209
        remove_inode_free(inode);
210
        wait = ((volatile struct inode *) inode)->i_wait;
211
        if (--inode->i_count)
212
                nr_free_inodes++;
213
        memset(inode,0,sizeof(*inode));
214
        ((volatile struct inode *) inode)->i_wait = wait;
215
        insert_inode_free(inode);
216
        /*
217
         * The inode is now reusable again, and the condemned flag is
218
         * clear.  Wake up anybody who is waiting on the condemned flag.
219
         */
220
        wake_up(&inode->i_wait);
221
}
222
 
223
int fs_may_mount(kdev_t dev)
224
{
225
        struct inode * inode, * next;
226
        int i;
227
 
228
        next = first_inode;
229
        for (i = nr_inodes ; i > 0 ; i--) {
230
                inode = next;
231
                next = inode->i_next;   /* clear_inode() changes the queues.. */
232
                if (inode->i_dev != dev)
233
                        continue;
234
                if (inode->i_count || inode->i_dirt || inode->i_lock)
235
                        return 0;
236
                clear_inode(inode);
237
        }
238
        return 1;
239
}
240
 
241
int fs_may_umount(kdev_t dev, struct inode * mount_root)
242
{
243
        struct inode * inode;
244
        int i;
245
 
246
        inode = first_inode;
247
        for (i=0 ; i < nr_inodes ; i++, inode = inode->i_next) {
248
                if (inode->i_dev != dev || !inode->i_count)
249
                        continue;
250
                if (inode == mount_root && inode->i_count ==
251
                    (inode->i_mount != inode ? 1 : 2))
252
                        continue;
253
                return 0;
254
        }
255
        return 1;
256
}
257
 
258
int fs_may_remount_ro(kdev_t dev)
259
{
260
        struct file * file;
261
        int i;
262
 
263
        /* Check that no files are currently opened for writing. */
264
        for (file = first_file, i=0; i<nr_files; i++, file=file->f_next) {
265
                if (!file->f_count || !file->f_inode ||
266
                    file->f_inode->i_dev != dev)
267
                        continue;
268
                if (S_ISREG(file->f_inode->i_mode) && (file->f_mode & 2))
269
                        return 0;
270
        }
271
        return 1;
272
}
273
 
274
static void write_inode(struct inode * inode)
275
{
276
        if (!inode->i_dirt)
277
                return;
278
        wait_on_inode(inode);
279
        if (!inode->i_dirt)
280
                return;
281
        if (!inode->i_sb || !inode->i_sb->s_op || !inode->i_sb->s_op->write_inode) {
282
                inode->i_dirt = 0;
283
                return;
284
        }
285
        inode->i_lock = 1;
286
        inode->i_sb->s_op->write_inode(inode);
287
        unlock_inode(inode);
288
}
289
 
290
static inline void read_inode(struct inode * inode)
291
{
292
        lock_inode(inode);
293
        if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->read_inode)
294
                inode->i_sb->s_op->read_inode(inode);
295
        unlock_inode(inode);
296
}
297
 
298
/* POSIX UID/GID verification for setting inode attributes */
299
int inode_change_ok(struct inode *inode, struct iattr *attr)
300
{
301
        /*
302
         *      If force is set do it anyway.
303
         */
304
 
305
        if (attr->ia_valid & ATTR_FORCE)
306
                return 0;
307
 
308
        /* Make sure a caller can chown */
309
        if ((attr->ia_valid & ATTR_UID) &&
310
            (current->fsuid != inode->i_uid ||
311
             attr->ia_uid != inode->i_uid) && !fsuser())
312
                return -EPERM;
313
 
314
        /* Make sure caller can chgrp */
315
        if ((attr->ia_valid & ATTR_GID) &&
316
            (!in_group_p(attr->ia_gid) && attr->ia_gid != inode->i_gid) &&
317
            !fsuser())
318
                return -EPERM;
319
 
320
        /* Make sure a caller can chmod */
321
        if (attr->ia_valid & ATTR_MODE) {
322
                if ((current->fsuid != inode->i_uid) && !fsuser())
323
                        return -EPERM;
324
                /* Also check the setgid bit! */
325
                if (!fsuser() && !in_group_p((attr->ia_valid & ATTR_GID) ? attr->ia_gid :
326
                                             inode->i_gid))
327
                        attr->ia_mode &= ~S_ISGID;
328
        }
329
 
330
        /* Check for setting the inode time */
331
        if ((attr->ia_valid & ATTR_ATIME_SET) &&
332
            ((current->fsuid != inode->i_uid) && !fsuser()))
333
                return -EPERM;
334
        if ((attr->ia_valid & ATTR_MTIME_SET) &&
335
            ((current->fsuid != inode->i_uid) && !fsuser()))
336
                return -EPERM;
337
        return 0;
338
}
339
 
340
/*
341
 * Set the appropriate attributes from an attribute structure into
342
 * the inode structure.
343
 */
344
void inode_setattr(struct inode *inode, struct iattr *attr)
345
{
346
        if (attr->ia_valid & ATTR_UID)
347
                inode->i_uid = attr->ia_uid;
348
        if (attr->ia_valid & ATTR_GID)
349
                inode->i_gid = attr->ia_gid;
350
        if (attr->ia_valid & ATTR_SIZE)
351
                inode->i_size = attr->ia_size;
352
        if (attr->ia_valid & ATTR_ATIME)
353
                inode->i_atime = attr->ia_atime;
354
        if (attr->ia_valid & ATTR_MTIME)
355
                inode->i_mtime = attr->ia_mtime;
356
        if (attr->ia_valid & ATTR_CTIME)
357
                inode->i_ctime = attr->ia_ctime;
358
        if (attr->ia_valid & ATTR_MODE) {
359
                inode->i_mode = attr->ia_mode;
360
                if (!fsuser() && !in_group_p(inode->i_gid))
361
                        inode->i_mode &= ~S_ISGID;
362
        }
363
        inode->i_dirt = 1;
364
}
365
 
366
/*
367
 * notify_change is called for inode-changing operations such as
368
 * chown, chmod, utime, and truncate.  It is guaranteed (unlike
369
 * write_inode) to be called from the context of the user requesting
370
 * the change.
371
 */
372
 
373
int notify_change(struct inode * inode, struct iattr *attr)
374
{
375
        int retval;
376
 
377
        attr->ia_ctime = CURRENT_TIME;
378
        if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
379
                if (!(attr->ia_valid & ATTR_ATIME_SET))
380
                        attr->ia_atime = attr->ia_ctime;
381
                if (!(attr->ia_valid & ATTR_MTIME_SET))
382
                        attr->ia_mtime = attr->ia_ctime;
383
        }
384
 
385
        if (inode->i_sb && inode->i_sb->s_op  &&
386
            inode->i_sb->s_op->notify_change)
387
                return inode->i_sb->s_op->notify_change(inode, attr);
388
 
389
        if ((retval = inode_change_ok(inode, attr)) != 0)
390
                return retval;
391
 
392
        inode_setattr(inode, attr);
393
        return 0;
394
}
395
 
396
/*
397
 * bmap is needed for demand-loading and paging: if this function
398
 * doesn't exist for a filesystem, then those things are impossible:
399
 * executables cannot be run from the filesystem etc...
400
 *
401
 * This isn't as bad as it sounds: the read-routines might still work,
402
 * so the filesystem would be otherwise ok (for example, you might have
403
 * a DOS filesystem, which doesn't lend itself to bmap very well, but
404
 * you could still transfer files to/from the filesystem)
405
 */
406
int bmap(struct inode * inode, int block)
407
{
408
        if (inode->i_op && inode->i_op->bmap)
409
                return inode->i_op->bmap(inode,block);
410
        return 0;
411
}
412
 
413
void invalidate_inodes(kdev_t dev)
414
{
415
        struct inode * inode, * next;
416
        int i;
417
 
418
        next = first_inode;
419
        for(i = nr_inodes ; i > 0 ; i--) {
420
                inode = next;
421
                next = inode->i_next;           /* clear_inode() changes the queues.. */
422
                if (inode->i_dev != dev)
423
                        continue;
424
                if (inode->i_count || inode->i_dirt || inode->i_lock) {
425
                        printk("VFS: inode busy on removed device %s\n",
426
                               kdevname(dev));
427
                        continue;
428
                }
429
                clear_inode(inode);
430
        }
431
}
432
 
433
void sync_inodes(kdev_t dev)
434
{
435
        int i;
436
        struct inode * inode;
437
 
438
        inode = first_inode;
439
        for(i = 0; i < nr_inodes*2; i++, inode = inode->i_next) {
440
                if (dev && inode->i_dev != dev)
441
                        continue;
442
                wait_on_inode(inode);
443
                if (inode->i_dirt)
444
                        write_inode(inode);
445
        }
446
}
447
 
448
void iput(struct inode * inode)
449
{
450
        if (!inode)
451
                return;
452
        wait_on_inode(inode);
453
        if (!inode->i_count) {
454
                printk("VFS: iput: trying to free free inode\n");
455
                printk("VFS: device %s, inode %lu, mode=0%07o\n",
456
                        kdevname(inode->i_rdev), inode->i_ino, inode->i_mode);
457
                return;
458
        }
459
        if (inode->i_pipe)
460
                wake_up_interruptible(&PIPE_WAIT(*inode));
461
 
462
repeat:
463
        if (inode->i_count>1) {
464
                inode->i_count--;
465
                return;
466
        }
467
 
468
        wake_up(&inode_wait);
469
        if (inode->i_pipe) {
470
                unsigned long page = (unsigned long) PIPE_BASE(*inode);
471
                PIPE_BASE(*inode) = NULL;
472
                free_page(page);
473
        }
474
 
475
        if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->put_inode) {
476
                inode->i_sb->s_op->put_inode(inode);
477
                if (!inode->i_nlink)
478
                        return;
479
        }
480
 
481
        if (inode->i_dirt) {
482
                write_inode(inode);     /* we can sleep - so do again */
483
                wait_on_inode(inode);
484
                goto repeat;
485
        }
486
 
487
        if (IS_WRITABLE(inode)) {
488
                if (inode->i_sb && inode->i_sb->dq_op) {
489
                        /* Here we can sleep also. Let's do it again
490
                         * Dmitry Gorodchanin 02/11/96
491
                         */
492
                        inode->i_lock = 1;
493
                        inode->i_sb->dq_op->drop(inode);
494
                        unlock_inode(inode);
495
                        goto repeat;
496
                }
497
        }
498
 
499
        inode->i_count--;
500
 
501
        if (inode->i_count)
502
        /*
503
         * Huoh, we were supposed to be the last user, but someone has
504
         * grabbed it while we were sleeping. Dont destroy inode VM
505
         * mappings, it might cause a memory leak.
506
         */
507
                return;
508
 
509
#ifndef NO_MM
510
        if (inode->i_mmap) {
511
                printk("iput: inode %lu on device %s still has mappings.\n",
512
                        inode->i_ino, kdevname(inode->i_dev));
513
                inode->i_mmap = NULL;
514
        }
515
#endif /* !NO_MM */
516
 
517
        nr_free_inodes++;
518
        return;
519
}
520
 
521
struct inode * get_empty_inode(void)
522
{
523
        static int ino = 0;
524
        struct inode * inode, * best;
525
        unsigned long badness;
526
        int i;
527
 
528
        if (nr_inodes < max_inodes && nr_free_inodes < (nr_inodes >> 1))
529
                grow_inodes();
530
repeat:
531
        inode = first_inode;
532
        best = NULL;
533
        badness = 1000;
534
        for (i = nr_inodes/2; i > 0; i--,inode = inode->i_next) {
535
                if (!inode->i_count) {
536
                        unsigned long i = 999;
537
                        if (!(inode->i_lock || inode->i_dirt))
538
                                i = inode->i_nrpages;
539
                        if (i < badness) {
540
                                best = inode;
541
                                if (!i)
542
                                        goto found_good;
543
                                badness = i;
544
                        }
545
                }
546
        }
547
        if (nr_inodes < max_inodes) {
548
                if (grow_inodes() == 0)
549
                        goto repeat;
550
                best = NULL;
551
        }
552
        if (!best) {
553
                printk("VFS: No free inodes - contact Linus\n");
554
                sleep_on(&inode_wait);
555
                goto repeat;
556
        }
557
        if (best->i_lock) {
558
                wait_on_inode(best);
559
                goto repeat;
560
        }
561
        if (best->i_dirt) {
562
                write_inode(best);
563
                goto repeat;
564
        }
565
        if (best->i_count)
566
                goto repeat;
567
found_good:
568
        clear_inode(best);
569
        best->i_count = 1;
570
        best->i_nlink = 1;
571
        best->i_version = ++event;
572
        best->i_sem.count = 1;
573
        best->i_ino = ++ino;
574
        best->i_dev = 0;
575
        nr_free_inodes--;
576
        if (nr_free_inodes < 0) {
577
                printk ("VFS: get_empty_inode: bad free inode count.\n");
578
                nr_free_inodes = 0;
579
        }
580
        return best;
581
}
582
 
583
struct inode * get_pipe_inode(void)
584
{
585
        struct inode * inode;
586
        extern struct inode_operations pipe_inode_operations;
587
 
588
        if (!(inode = get_empty_inode()))
589
                return NULL;
590
        if (!(PIPE_BASE(*inode) = (char*) __get_free_page(GFP_USER))) {
591
                iput(inode);
592
                return NULL;
593
        }
594
        inode->i_op = &pipe_inode_operations;
595
        inode->i_count = 2;     /* sum of readers/writers */
596
        PIPE_WAIT(*inode) = NULL;
597
        PIPE_START(*inode) = PIPE_LEN(*inode) = 0;
598
        PIPE_RD_OPENERS(*inode) = PIPE_WR_OPENERS(*inode) = 0;
599
        PIPE_READERS(*inode) = PIPE_WRITERS(*inode) = 1;
600
        PIPE_LOCK(*inode) = 0;
601
        inode->i_pipe = 1;
602
        inode->i_mode |= S_IFIFO | S_IRUSR | S_IWUSR;
603
        inode->i_uid = current->fsuid;
604
        inode->i_gid = current->fsgid;
605
        inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
606
        inode->i_blksize = PAGE_SIZE;
607
        return inode;
608
}
609
 
610
struct inode *__iget(struct super_block * sb, int nr, int crossmntp)
611
{
612
        static struct wait_queue * update_wait = NULL;
613
        struct inode_hash_entry * h;
614
        struct inode * inode;
615
        struct inode * empty = NULL;
616
 
617
        if (!sb)
618
                panic("VFS: iget with sb==NULL");
619
        h = hash(sb->s_dev, nr);
620
repeat:
621
        for (inode = h->inode; inode ; inode = inode->i_hash_next)
622
                if (inode->i_dev == sb->s_dev && inode->i_ino == nr)
623
                        goto found_it;
624
        if (!empty) {
625
                /*
626
                 * If we sleep here before we have found an inode
627
                 * we need to make sure nobody does anything bad
628
                 * to the inode while we sleep, because otherwise
629
                 * we may return an inode that is not valid any
630
                 * more when we wake up..
631
                 */
632
                h->updating++;
633
                empty = get_empty_inode();
634
                if (!--h->updating)
635
                        wake_up(&update_wait);
636
                if (empty)
637
                        goto repeat;
638
                return (NULL);
639
        }
640
        inode = empty;
641
        inode->i_sb = sb;
642
        inode->i_dev = sb->s_dev;
643
        inode->i_ino = nr;
644
        inode->i_flags = sb->s_flags;
645
        put_last_free(inode);
646
        insert_inode_hash(inode);
647
        read_inode(inode);
648
        goto return_it;
649
 
650
found_it:
651
        /*
652
         * The inode may currently be being pulled down by
653
         * clear_inode().  Avoid it if so.  If we get past this, then
654
         * the increment of i_count will prevent the inode's reuse.
655
         */
656
        if (inode->i_condemned) {
657
                sleep_on(&inode->i_wait);
658
                goto repeat;
659
        }
660
        if (!inode->i_count)
661
                nr_free_inodes--;
662
        inode->i_count++;
663
        wait_on_inode(inode);
664
        if (inode->i_dev != sb->s_dev || inode->i_ino != nr) {
665
                printk("Whee.. inode changed from under us. Tell Linus\n");
666
                iput(inode);
667
                goto repeat;
668
        }
669
        if (crossmntp && inode->i_mount) {
670
                struct inode * tmp = inode->i_mount;
671
                tmp->i_count++;
672
                iput(inode);
673
                inode = tmp;
674
                wait_on_inode(inode);
675
        }
676
        if (empty)
677
                iput(empty);
678
 
679
return_it:
680
        while (h->updating)
681
                sleep_on(&update_wait);
682
        return inode;
683
}
684
 
685
/*
686
 * The "new" scheduling primitives (new as of 0.97 or so) allow this to
687
 * be done without disabling interrupts (other than in the actual queue
688
 * updating things: only a couple of 386 instructions). This should be
689
 * much better for interrupt latency.
690
 */
691
static void __wait_on_inode(struct inode * inode)
692
{
693
        struct wait_queue wait = { current, NULL };
694
 
695
        add_wait_queue(&inode->i_wait, &wait);
696
repeat:
697
        current->state = TASK_UNINTERRUPTIBLE;
698
        if (inode->i_lock) {
699
                schedule();
700
                goto repeat;
701
        }
702
        remove_wait_queue(&inode->i_wait, &wait);
703
        current->state = TASK_RUNNING;
704
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.