OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [fs/] [ext2/] [truncate.c] - Blame information for rev 1777

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1628 jcastillo
/*
2
 *  linux/fs/ext2/truncate.c
3
 *
4
 * Copyright (C) 1992, 1993, 1994, 1995
5
 * Remy Card (card@masi.ibp.fr)
6
 * Laboratoire MASI - Institut Blaise Pascal
7
 * Universite Pierre et Marie Curie (Paris VI)
8
 *
9
 *  from
10
 *
11
 *  linux/fs/minix/truncate.c
12
 *
13
 *  Copyright (C) 1991, 1992  Linus Torvalds
14
 *
15
 *  Big-endian to little-endian byte-swapping/bitmaps by
16
 *        David S. Miller (davem@caip.rutgers.edu), 1995
17
 */
18
 
19
/*
20
 * Real random numbers for secure rm added 94/02/18
21
 * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
22
 */
23
 
24
#include <linux/errno.h>
25
#include <linux/fs.h>
26
#include <linux/ext2_fs.h>
27
#include <linux/fcntl.h>
28
#include <linux/sched.h>
29
#include <linux/stat.h>
30
#include <linux/locks.h>
31
#include <linux/string.h>
32
 
33
#if 0
34
 
35
/*
36
 * Secure deletion currently doesn't work. It interacts very badly
37
 * with buffers shared with memory mappings, and for that reason
38
 * can't be done in the truncate() routines. It should instead be
39
 * done separately in "release()" before calling the truncate routines
40
 * that will release the actual file blocks.
41
 *
42
 *              Linus
43
 */
44
static int ext2_secrm_seed = 152;       /* Random generator base */
45
 
46
#define RANDOM_INT (ext2_secrm_seed = ext2_secrm_seed * 69069l +1)
47
#endif
48
 
49
/*
50
 * Truncate has the most races in the whole filesystem: coding it is
51
 * a pain in the a**. Especially as I don't do any locking...
52
 *
53
 * The code may look a bit weird, but that's just because I've tried to
54
 * handle things like file-size changes in a somewhat graceful manner.
55
 * Anyway, truncating a file at the same time somebody else writes to it
56
 * is likely to result in pretty weird behaviour...
57
 *
58
 * The new code handles normal truncates (size = 0) as well as the more
59
 * general case (size = XXX). I hope.
60
 */
61
 
62
static int trunc_direct (struct inode * inode)
63
{
64
        u32 * p;
65
        int i, tmp;
66
        struct buffer_head * bh;
67
        unsigned long block_to_free = 0;
68
        unsigned long free_count = 0;
69
        int retry = 0;
70
        int blocks = inode->i_sb->s_blocksize / 512;
71
#define DIRECT_BLOCK ((inode->i_size + inode->i_sb->s_blocksize - 1) / \
72
                        inode->i_sb->s_blocksize)
73
        int direct_block = DIRECT_BLOCK;
74
 
75
repeat:
76
        for (i = direct_block ; i < EXT2_NDIR_BLOCKS ; i++) {
77
                p = inode->u.ext2_i.i_data + i;
78
                tmp = *p;
79
                if (!tmp)
80
                        continue;
81
                bh = get_hash_table (inode->i_dev, tmp,
82
                                     inode->i_sb->s_blocksize);
83
                if (i < direct_block) {
84
                        brelse (bh);
85
                        goto repeat;
86
                }
87
                if ((bh && bh->b_count != 1) || tmp != *p) {
88
                        retry = 1;
89
                        brelse (bh);
90
                        continue;
91
                }
92
                *p = 0;
93
                inode->i_blocks -= blocks;
94
                inode->i_dirt = 1;
95
                bforget(bh);
96
                if (free_count == 0) {
97
                        block_to_free = tmp;
98
                        free_count++;
99
                } else if (free_count > 0 && block_to_free == tmp - free_count)
100
                        free_count++;
101
                else {
102
                        ext2_free_blocks (inode, block_to_free, free_count);
103
                        block_to_free = tmp;
104
                        free_count = 1;
105
                }
106
/*              ext2_free_blocks (inode, tmp, 1); */
107
        }
108
        if (free_count > 0)
109
                ext2_free_blocks (inode, block_to_free, free_count);
110
        return retry;
111
}
112
 
113
static int trunc_indirect (struct inode * inode, int offset, u32 * p)
114
{
115
        int i, tmp;
116
        struct buffer_head * bh;
117
        struct buffer_head * ind_bh;
118
        u32 * ind;
119
        unsigned long block_to_free = 0;
120
        unsigned long free_count = 0;
121
        int retry = 0;
122
        int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
123
        int blocks = inode->i_sb->s_blocksize / 512;
124
#define INDIRECT_BLOCK ((int)DIRECT_BLOCK - offset)
125
        int indirect_block = INDIRECT_BLOCK;
126
 
127
        tmp = *p;
128
        if (!tmp)
129
                return 0;
130
        ind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
131
        if (tmp != *p) {
132
                brelse (ind_bh);
133
                return 1;
134
        }
135
        if (!ind_bh) {
136
                *p = 0;
137
                return 0;
138
        }
139
repeat:
140
        for (i = indirect_block ; i < addr_per_block ; i++) {
141
                if (i < 0)
142
                        i = 0;
143
                if (i < indirect_block)
144
                        goto repeat;
145
                ind = i + (u32 *) ind_bh->b_data;
146
                tmp = swab32(*ind);
147
                if (!tmp)
148
                        continue;
149
                bh = get_hash_table (inode->i_dev, tmp,
150
                                     inode->i_sb->s_blocksize);
151
                if (i < indirect_block) {
152
                        brelse (bh);
153
                        goto repeat;
154
                }
155
                if ((bh && bh->b_count != 1) || tmp != swab32(*ind)) {
156
                        retry = 1;
157
                        brelse (bh);
158
                        continue;
159
                }
160
                *ind = swab32(0);
161
                mark_buffer_dirty(ind_bh, 1);
162
                bforget(bh);
163
                if (free_count == 0) {
164
                        block_to_free = tmp;
165
                        free_count++;
166
                } else if (free_count > 0 && block_to_free == tmp - free_count)
167
                        free_count++;
168
                else {
169
                        ext2_free_blocks (inode, block_to_free, free_count);
170
                        block_to_free = tmp;
171
                        free_count = 1;
172
                }
173
/*              ext2_free_blocks (inode, tmp, 1); */
174
                inode->i_blocks -= blocks;
175
                inode->i_dirt = 1;
176
        }
177
        if (free_count > 0)
178
                ext2_free_blocks (inode, block_to_free, free_count);
179
        ind = (u32 *) ind_bh->b_data;
180
        for (i = 0; i < addr_per_block; i++)
181
                if (swab32(*(ind++)))
182
                        break;
183
        if (i >= addr_per_block)
184
                if (ind_bh->b_count != 1)
185
                        retry = 1;
186
                else {
187
                        tmp = *p;
188
                        *p = 0;
189
                        inode->i_blocks -= blocks;
190
                        inode->i_dirt = 1;
191
                        ext2_free_blocks (inode, tmp, 1);
192
                }
193
        if (IS_SYNC(inode) && buffer_dirty(ind_bh)) {
194
                ll_rw_block (WRITE, 1, &ind_bh);
195
                wait_on_buffer (ind_bh);
196
        }
197
        brelse (ind_bh);
198
        return retry;
199
}
200
 
201
static int trunc_indirect_swab32 (struct inode * inode, int offset, u32 * p)
202
{
203
        int i, tmp;
204
        struct buffer_head * bh;
205
        struct buffer_head * ind_bh;
206
        u32 * ind;
207
        unsigned long block_to_free = 0;
208
        unsigned long free_count = 0;
209
        int retry = 0;
210
        int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
211
        int blocks = inode->i_sb->s_blocksize / 512;
212
        int indirect_block = INDIRECT_BLOCK;
213
 
214
        tmp = swab32(*p);
215
        if (!tmp)
216
                return 0;
217
        ind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
218
        if (tmp != swab32(*p)) {
219
                brelse (ind_bh);
220
                return 1;
221
        }
222
        if (!ind_bh) {
223
                *p = swab32(0);
224
                return 0;
225
        }
226
repeat:
227
        for (i = indirect_block ; i < addr_per_block ; i++) {
228
                if (i < 0)
229
                        i = 0;
230
                if (i < indirect_block)
231
                        goto repeat;
232
                ind = i + (u32 *) ind_bh->b_data;
233
                tmp = swab32(*ind);
234
                if (!tmp)
235
                        continue;
236
                bh = get_hash_table (inode->i_dev, tmp,
237
                                     inode->i_sb->s_blocksize);
238
                if (i < indirect_block) {
239
                        brelse (bh);
240
                        goto repeat;
241
                }
242
                if ((bh && bh->b_count != 1) || tmp != swab32(*ind)) {
243
                        retry = 1;
244
                        brelse (bh);
245
                        continue;
246
                }
247
                *ind = swab32(0);
248
                mark_buffer_dirty(ind_bh, 1);
249
                bforget(bh);
250
                if (free_count == 0) {
251
                        block_to_free = tmp;
252
                        free_count++;
253
                } else if (free_count > 0 && block_to_free == tmp - free_count)
254
                        free_count++;
255
                else {
256
                        ext2_free_blocks (inode, block_to_free, free_count);
257
                        block_to_free = tmp;
258
                        free_count = 1;
259
                }
260
/*              ext2_free_blocks (inode, tmp, 1); */
261
                inode->i_blocks -= blocks;
262
                inode->i_dirt = 1;
263
        }
264
        if (free_count > 0)
265
                ext2_free_blocks (inode, block_to_free, free_count);
266
        ind = (u32 *) ind_bh->b_data;
267
        for (i = 0; i < addr_per_block; i++)
268
                if (swab32(*(ind++)))
269
                        break;
270
        if (i >= addr_per_block)
271
                if (ind_bh->b_count != 1)
272
                        retry = 1;
273
                else {
274
                        tmp = swab32(*p);
275
                        *p = swab32(0);
276
                        inode->i_blocks -= blocks;
277
                        inode->i_dirt = 1;
278
                        ext2_free_blocks (inode, tmp, 1);
279
                }
280
        if (IS_SYNC(inode) && buffer_dirty(ind_bh)) {
281
                ll_rw_block (WRITE, 1, &ind_bh);
282
                wait_on_buffer (ind_bh);
283
        }
284
        brelse (ind_bh);
285
        return retry;
286
}
287
 
288
static int trunc_dindirect (struct inode * inode, int offset,
289
                            u32 * p)
290
{
291
        int i, tmp;
292
        struct buffer_head * dind_bh;
293
        u32 * dind;
294
        int retry = 0;
295
        int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
296
        int blocks = inode->i_sb->s_blocksize / 512;
297
#define DINDIRECT_BLOCK (((int)DIRECT_BLOCK - offset) / addr_per_block)
298
        int dindirect_block = DINDIRECT_BLOCK;
299
 
300
        tmp = *p;
301
        if (!tmp)
302
                return 0;
303
        dind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
304
        if (tmp != *p) {
305
                brelse (dind_bh);
306
                return 1;
307
        }
308
        if (!dind_bh) {
309
                *p = 0;
310
                return 0;
311
        }
312
repeat:
313
        for (i = dindirect_block ; i < addr_per_block ; i++) {
314
                if (i < 0)
315
                        i = 0;
316
                if (i < dindirect_block)
317
                        goto repeat;
318
                dind = i + (u32 *) dind_bh->b_data;
319
                tmp = swab32(*dind);
320
                if (!tmp)
321
                        continue;
322
                retry |= trunc_indirect_swab32 (inode, offset + (i * addr_per_block),
323
                                                dind);
324
                mark_buffer_dirty(dind_bh, 1);
325
        }
326
        dind = (u32 *) dind_bh->b_data;
327
        for (i = 0; i < addr_per_block; i++)
328
                if (swab32(*(dind++)))
329
                        break;
330
        if (i >= addr_per_block)
331
                if (dind_bh->b_count != 1)
332
                        retry = 1;
333
                else {
334
                        tmp = *p;
335
                        *p = 0;
336
                        inode->i_blocks -= blocks;
337
                        inode->i_dirt = 1;
338
                        ext2_free_blocks (inode, tmp, 1);
339
                }
340
        if (IS_SYNC(inode) && buffer_dirty(dind_bh)) {
341
                ll_rw_block (WRITE, 1, &dind_bh);
342
                wait_on_buffer (dind_bh);
343
        }
344
        brelse (dind_bh);
345
        return retry;
346
}
347
 
348
static int trunc_dindirect_swab32 (struct inode * inode, int offset,
349
                                   u32 * p)
350
{
351
        int i, tmp;
352
        struct buffer_head * dind_bh;
353
        u32 * dind;
354
        int retry = 0;
355
        int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
356
        int blocks = inode->i_sb->s_blocksize / 512;
357
        int dindirect_block = DINDIRECT_BLOCK;
358
 
359
        tmp = swab32(*p);
360
        if (!tmp)
361
                return 0;
362
        dind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
363
        if (tmp != swab32(*p)) {
364
                brelse (dind_bh);
365
                return 1;
366
        }
367
        if (!dind_bh) {
368
                *p = swab32(0);
369
                return 0;
370
        }
371
repeat:
372
        for (i = dindirect_block ; i < addr_per_block ; i++) {
373
                if (i < 0)
374
                        i = 0;
375
                if (i < dindirect_block)
376
                        goto repeat;
377
                dind = i + (u32 *) dind_bh->b_data;
378
                tmp = swab32(*dind);
379
                if (!tmp)
380
                        continue;
381
                retry |= trunc_indirect_swab32 (inode, offset + (i * addr_per_block),
382
                                                dind);
383
                mark_buffer_dirty(dind_bh, 1);
384
        }
385
        dind = (u32 *) dind_bh->b_data;
386
        for (i = 0; i < addr_per_block; i++)
387
                if (swab32(*(dind++)))
388
                        break;
389
        if (i >= addr_per_block)
390
                if (dind_bh->b_count != 1)
391
                        retry = 1;
392
                else {
393
                        tmp = swab32(*p);
394
                        *p = swab32(0);
395
                        inode->i_blocks -= blocks;
396
                        inode->i_dirt = 1;
397
                        ext2_free_blocks (inode, tmp, 1);
398
                }
399
        if (IS_SYNC(inode) && buffer_dirty(dind_bh)) {
400
                ll_rw_block (WRITE, 1, &dind_bh);
401
                wait_on_buffer (dind_bh);
402
        }
403
        brelse (dind_bh);
404
        return retry;
405
}
406
 
407
static int trunc_tindirect (struct inode * inode)
408
{
409
        int i, tmp;
410
        struct buffer_head * tind_bh;
411
        u32 * tind, * p;
412
        int retry = 0;
413
        int addr_per_block = EXT2_ADDR_PER_BLOCK(inode->i_sb);
414
        int blocks = inode->i_sb->s_blocksize / 512;
415
#define TINDIRECT_BLOCK (((int)DIRECT_BLOCK - (addr_per_block * addr_per_block + \
416
                          addr_per_block + EXT2_NDIR_BLOCKS)) / \
417
                          (addr_per_block * addr_per_block))
418
        int tindirect_block = TINDIRECT_BLOCK;
419
 
420
        p = inode->u.ext2_i.i_data + EXT2_TIND_BLOCK;
421
        if (!(tmp = *p))
422
                return 0;
423
        tind_bh = bread (inode->i_dev, tmp, inode->i_sb->s_blocksize);
424
        if (tmp != *p) {
425
                brelse (tind_bh);
426
                return 1;
427
        }
428
        if (!tind_bh) {
429
                *p = 0;
430
                return 0;
431
        }
432
repeat:
433
        for (i = tindirect_block ; i < addr_per_block ; i++) {
434
                if (i < 0)
435
                        i = 0;
436
                if (i < tindirect_block)
437
                        goto repeat;
438
                tind = i + (u32 *) tind_bh->b_data;
439
                retry |= trunc_dindirect_swab32(inode, EXT2_NDIR_BLOCKS +
440
                         addr_per_block + (i + 1) * addr_per_block * addr_per_block,
441
                         tind);
442
                mark_buffer_dirty(tind_bh, 1);
443
        }
444
        tind = (u32 *) tind_bh->b_data;
445
        for (i = 0; i < addr_per_block; i++)
446
                if (swab32(*(tind++)))
447
                        break;
448
        if (i >= addr_per_block)
449
                if (tind_bh->b_count != 1)
450
                        retry = 1;
451
                else {
452
                        tmp = *p;
453
                        *p = 0;
454
                        inode->i_blocks -= blocks;
455
                        inode->i_dirt = 1;
456
                        ext2_free_blocks (inode, tmp, 1);
457
                }
458
        if (IS_SYNC(inode) && buffer_dirty(tind_bh)) {
459
                ll_rw_block (WRITE, 1, &tind_bh);
460
                wait_on_buffer (tind_bh);
461
        }
462
        brelse (tind_bh);
463
        return retry;
464
}
465
 
466
void ext2_truncate (struct inode * inode)
467
{
468
        int retry;
469
        struct buffer_head * bh;
470
        int err;
471
        int offset;
472
 
473
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
474
            S_ISLNK(inode->i_mode)))
475
                return;
476
        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
477
                return;
478
        ext2_discard_prealloc(inode);
479
        while (1) {
480
                retry = trunc_direct(inode);
481
                retry |= trunc_indirect (inode, EXT2_IND_BLOCK,
482
                        (u32 *) &inode->u.ext2_i.i_data[EXT2_IND_BLOCK]);
483
                retry |= trunc_dindirect (inode, EXT2_IND_BLOCK +
484
                        EXT2_ADDR_PER_BLOCK(inode->i_sb),
485
                        (u32 *) &inode->u.ext2_i.i_data[EXT2_DIND_BLOCK]);
486
                retry |= trunc_tindirect (inode);
487
                if (!retry)
488
                        break;
489
                if (IS_SYNC(inode) && inode->i_dirt)
490
                        ext2_sync_inode (inode);
491
                current->counter = 0;
492
                schedule ();
493
        }
494
        /*
495
         * If the file is not being truncated to a block boundary, the
496
         * contents of the partial block following the end of the file must be
497
         * zeroed in case it ever becomes accessible again because of
498
         * subsequent file growth.
499
         */
500
        offset = inode->i_size & (inode->i_sb->s_blocksize - 1);
501
        if (offset) {
502
                bh = ext2_bread (inode,
503
                                 inode->i_size >> EXT2_BLOCK_SIZE_BITS(inode->i_sb),
504
                                 0, &err);
505
                if (bh) {
506
                        memset (bh->b_data + offset, 0,
507
                                inode->i_sb->s_blocksize - offset);
508
                        mark_buffer_dirty (bh, 0);
509
                        brelse (bh);
510
                }
511
        }
512
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
513
        inode->i_dirt = 1;
514
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.