OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [fs/] [ufs/] [truncate.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 *  linux/fs/ufs/truncate.c
3
 *
4
 * Copyright (C) 1998
5
 * Daniel Pirkl <daniel.pirkl@email.cz>
6
 * Charles University, Faculty of Mathematics and Physics
7
 *
8
 *  from
9
 *
10
 *  linux/fs/ext2/truncate.c
11
 *
12
 * Copyright (C) 1992, 1993, 1994, 1995
13
 * Remy Card (card@masi.ibp.fr)
14
 * Laboratoire MASI - Institut Blaise Pascal
15
 * Universite Pierre et Marie Curie (Paris VI)
16
 *
17
 *  from
18
 *
19
 *  linux/fs/minix/truncate.c
20
 *
21
 *  Copyright (C) 1991, 1992  Linus Torvalds
22
 *
23
 *  Big-endian to little-endian byte-swapping/bitmaps by
24
 *        David S. Miller (davem@caip.rutgers.edu), 1995
25
 */
26
 
27
/*
28
 * Real random numbers for secure rm added 94/02/18
29
 * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr>
30
 */
31
 
32
#include <linux/errno.h>
33
#include <linux/fs.h>
34
#include <linux/ufs_fs.h>
35
#include <linux/fcntl.h>
36
#include <linux/sched.h>
37
#include <linux/stat.h>
38
#include <linux/locks.h>
39
#include <linux/string.h>
40
 
41
#include "swab.h"
42
#include "util.h"
43
 
44
#undef UFS_TRUNCATE_DEBUG
45
 
46
#ifdef UFS_TRUNCATE_DEBUG
47
#define UFSD(x) printk("(%s, %d), %s: ", __FILE__, __LINE__, __FUNCTION__); printk x;
48
#else
49
#define UFSD(x)
50
#endif
51
 
52
/*
53
 * Secure deletion currently doesn't work. It interacts very badly
54
 * with buffers shared with memory mappings, and for that reason
55
 * can't be done in the truncate() routines. It should instead be
56
 * done separately in "release()" before calling the truncate routines
57
 * that will release the actual file blocks.
58
 *
59
 *              Linus
60
 */
61
 
62
#define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift)
63
#define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift)
64
 
65
#define DATA_BUFFER_USED(bh) \
66
        (atomic_read(&bh->b_count)>1 || buffer_locked(bh))
67
 
68
static int ufs_trunc_direct (struct inode * inode)
69
{
70
        struct super_block * sb;
71
        struct ufs_sb_private_info * uspi;
72
        struct buffer_head * bh;
73
        u32 * p;
74
        unsigned frag1, frag2, frag3, frag4, block1, block2;
75
        unsigned frag_to_free, free_count;
76
        unsigned i, j, tmp;
77
        int retry;
78
 
79
        UFSD(("ENTER\n"))
80
 
81
        sb = inode->i_sb;
82
        uspi = sb->u.ufs_sb.s_uspi;
83
 
84
        frag_to_free = 0;
85
        free_count = 0;
86
        retry = 0;
87
 
88
        frag1 = DIRECT_FRAGMENT;
89
        frag4 = min_t(u32, UFS_NDIR_FRAGMENT, inode->u.ufs_i.i_lastfrag);
90
        frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
91
        frag3 = frag4 & ~uspi->s_fpbmask;
92
        block1 = block2 = 0;
93
        if (frag2 > frag3) {
94
                frag2 = frag4;
95
                frag3 = frag4 = 0;
96
        }
97
        else if (frag2 < frag3) {
98
                block1 = ufs_fragstoblks (frag2);
99
                block2 = ufs_fragstoblks (frag3);
100
        }
101
 
102
        UFSD(("frag1 %u, frag2 %u, block1 %u, block2 %u, frag3 %u, frag4 %u\n", frag1, frag2, block1, block2, frag3, frag4))
103
 
104
        if (frag1 >= frag2)
105
                goto next1;
106
 
107
        /*
108
         * Free first free fragments
109
         */
110
        p = inode->u.ufs_i.i_u1.i_data + ufs_fragstoblks (frag1);
111
        tmp = fs32_to_cpu(sb, *p);
112
        if (!tmp )
113
                ufs_panic (sb, "ufs_trunc_direct", "internal error");
114
        frag1 = ufs_fragnum (frag1);
115
        frag2 = ufs_fragnum (frag2);
116
        for (j = frag1; j < frag2; j++) {
117
                bh = sb_get_hash_table (sb, tmp + j);
118
                if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
119
                        retry = 1;
120
                        brelse (bh);
121
                        goto next1;
122
                }
123
                bforget (bh);
124
        }
125
        inode->i_blocks -= (frag2-frag1) << uspi->s_nspfshift;
126
        mark_inode_dirty(inode);
127
        ufs_free_fragments (inode, tmp + frag1, frag2 - frag1);
128
        frag_to_free = tmp + frag1;
129
 
130
next1:
131
        /*
132
         * Free whole blocks
133
         */
134
        for (i = block1 ; i < block2; i++) {
135
                p = inode->u.ufs_i.i_u1.i_data + i;
136
                tmp = fs32_to_cpu(sb, *p);
137
                if (!tmp)
138
                        continue;
139
                for (j = 0; j < uspi->s_fpb; j++) {
140
                        bh = sb_get_hash_table(sb, tmp + j);
141
                        if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
142
                                retry = 1;
143
                                brelse (bh);
144
                                goto next2;
145
                        }
146
                        bforget (bh);
147
                }
148
                *p = 0;
149
                inode->i_blocks -= uspi->s_nspb;
150
                mark_inode_dirty(inode);
151
                if (free_count == 0) {
152
                        frag_to_free = tmp;
153
                        free_count = uspi->s_fpb;
154
                } else if (free_count > 0 && frag_to_free == tmp - free_count)
155
                        free_count += uspi->s_fpb;
156
                else {
157
                        ufs_free_blocks (inode, frag_to_free, free_count);
158
                        frag_to_free = tmp;
159
                        free_count = uspi->s_fpb;
160
                }
161
next2:;
162
        }
163
 
164
        if (free_count > 0)
165
                ufs_free_blocks (inode, frag_to_free, free_count);
166
 
167
        if (frag3 >= frag4)
168
                goto next3;
169
 
170
        /*
171
         * Free last free fragments
172
         */
173
        p = inode->u.ufs_i.i_u1.i_data + ufs_fragstoblks (frag3);
174
        tmp = fs32_to_cpu(sb, *p);
175
        if (!tmp )
176
                ufs_panic(sb, "ufs_truncate_direct", "internal error");
177
        frag4 = ufs_fragnum (frag4);
178
        for (j = 0; j < frag4; j++) {
179
                bh = sb_get_hash_table (sb, tmp + j);
180
                if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *p)) {
181
                        retry = 1;
182
                        brelse (bh);
183
                        goto next1;
184
                }
185
                bforget (bh);
186
        }
187
        *p = 0;
188
        inode->i_blocks -= frag4 << uspi->s_nspfshift;
189
        mark_inode_dirty(inode);
190
        ufs_free_fragments (inode, tmp, frag4);
191
 next3:
192
 
193
        UFSD(("EXIT\n"))
194
        return retry;
195
}
196
 
197
 
198
static int ufs_trunc_indirect (struct inode * inode, unsigned offset, u32 * p)
199
{
200
        struct super_block * sb;
201
        struct ufs_sb_private_info * uspi;
202
        struct ufs_buffer_head * ind_ubh;
203
        struct buffer_head * bh;
204
        u32 * ind;
205
        unsigned indirect_block, i, j, tmp;
206
        unsigned frag_to_free, free_count;
207
        int retry;
208
 
209
        UFSD(("ENTER\n"))
210
 
211
        sb = inode->i_sb;
212
        uspi = sb->u.ufs_sb.s_uspi;
213
 
214
        frag_to_free = 0;
215
        free_count = 0;
216
        retry = 0;
217
 
218
        tmp = fs32_to_cpu(sb, *p);
219
        if (!tmp)
220
                return 0;
221
        ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize);
222
        if (tmp != fs32_to_cpu(sb, *p)) {
223
                ubh_brelse (ind_ubh);
224
                return 1;
225
        }
226
        if (!ind_ubh) {
227
                *p = 0;
228
                return 0;
229
        }
230
 
231
        indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0;
232
        for (i = indirect_block; i < uspi->s_apb; i++) {
233
                ind = ubh_get_addr32 (ind_ubh, i);
234
                tmp = fs32_to_cpu(sb, *ind);
235
                if (!tmp)
236
                        continue;
237
                for (j = 0; j < uspi->s_fpb; j++) {
238
                        bh = sb_get_hash_table(sb, tmp + j);
239
                        if ((bh && DATA_BUFFER_USED(bh)) || tmp != fs32_to_cpu(sb, *ind)) {
240
                                retry = 1;
241
                                brelse (bh);
242
                                goto next;
243
                        }
244
                        bforget (bh);
245
                }
246
                *ind = 0;
247
                ubh_mark_buffer_dirty(ind_ubh);
248
                if (free_count == 0) {
249
                        frag_to_free = tmp;
250
                        free_count = uspi->s_fpb;
251
                } else if (free_count > 0 && frag_to_free == tmp - free_count)
252
                        free_count += uspi->s_fpb;
253
                else {
254
                        ufs_free_blocks (inode, frag_to_free, free_count);
255
                        frag_to_free = tmp;
256
                        free_count = uspi->s_fpb;
257
                }
258
                inode->i_blocks -= uspi->s_nspb;
259
                mark_inode_dirty(inode);
260
next:;
261
        }
262
 
263
        if (free_count > 0) {
264
                ufs_free_blocks (inode, frag_to_free, free_count);
265
        }
266
        for (i = 0; i < uspi->s_apb; i++)
267
                if (*ubh_get_addr32(ind_ubh,i))
268
                        break;
269
        if (i >= uspi->s_apb) {
270
                if (ubh_max_bcount(ind_ubh) != 1) {
271
                        retry = 1;
272
                }
273
                else {
274
                        tmp = fs32_to_cpu(sb, *p);
275
                        *p = 0;
276
                        inode->i_blocks -= uspi->s_nspb;
277
                        mark_inode_dirty(inode);
278
                        ufs_free_blocks (inode, tmp, uspi->s_fpb);
279
                        ubh_bforget(ind_ubh);
280
                        ind_ubh = NULL;
281
                }
282
        }
283
        if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
284
                ubh_ll_rw_block (WRITE, 1, &ind_ubh);
285
                ubh_wait_on_buffer (ind_ubh);
286
        }
287
        ubh_brelse (ind_ubh);
288
 
289
        UFSD(("EXIT\n"))
290
 
291
        return retry;
292
}
293
 
294
static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
295
{
296
        struct super_block * sb;
297
        struct ufs_sb_private_info * uspi;
298
        struct ufs_buffer_head * dind_bh;
299
        unsigned i, tmp, dindirect_block;
300
        u32 * dind;
301
        int retry = 0;
302
 
303
        UFSD(("ENTER\n"))
304
 
305
        sb = inode->i_sb;
306
        uspi = sb->u.ufs_sb.s_uspi;
307
 
308
        dindirect_block = (DIRECT_BLOCK > offset)
309
                ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0;
310
        retry = 0;
311
 
312
        tmp = fs32_to_cpu(sb, *p);
313
        if (!tmp)
314
                return 0;
315
        dind_bh = ubh_bread(sb, tmp, uspi->s_bsize);
316
        if (tmp != fs32_to_cpu(sb, *p)) {
317
                ubh_brelse (dind_bh);
318
                return 1;
319
        }
320
        if (!dind_bh) {
321
                *p = 0;
322
                return 0;
323
        }
324
 
325
        for (i = dindirect_block ; i < uspi->s_apb ; i++) {
326
                dind = ubh_get_addr32 (dind_bh, i);
327
                tmp = fs32_to_cpu(sb, *dind);
328
                if (!tmp)
329
                        continue;
330
                retry |= ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind);
331
                ubh_mark_buffer_dirty(dind_bh);
332
        }
333
 
334
        for (i = 0; i < uspi->s_apb; i++)
335
                if (*ubh_get_addr32 (dind_bh, i))
336
                        break;
337
        if (i >= uspi->s_apb) {
338
                if (ubh_max_bcount(dind_bh) != 1)
339
                        retry = 1;
340
                else {
341
                        tmp = fs32_to_cpu(sb, *p);
342
                        *p = 0;
343
                        inode->i_blocks -= uspi->s_nspb;
344
                        mark_inode_dirty(inode);
345
                        ufs_free_blocks (inode, tmp, uspi->s_fpb);
346
                        ubh_bforget(dind_bh);
347
                        dind_bh = NULL;
348
                }
349
        }
350
        if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
351
                ubh_ll_rw_block (WRITE, 1, &dind_bh);
352
                ubh_wait_on_buffer (dind_bh);
353
        }
354
        ubh_brelse (dind_bh);
355
 
356
        UFSD(("EXIT\n"))
357
 
358
        return retry;
359
}
360
 
361
static int ufs_trunc_tindirect (struct inode * inode)
362
{
363
        struct super_block * sb;
364
        struct ufs_sb_private_info * uspi;
365
        struct ufs_buffer_head * tind_bh;
366
        unsigned tindirect_block, tmp, i;
367
        u32 * tind, * p;
368
        int retry;
369
 
370
        UFSD(("ENTER\n"))
371
 
372
        sb = inode->i_sb;
373
        uspi = sb->u.ufs_sb.s_uspi;
374
        retry = 0;
375
 
376
        tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb))
377
                ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0;
378
        p = inode->u.ufs_i.i_u1.i_data + UFS_TIND_BLOCK;
379
        if (!(tmp = fs32_to_cpu(sb, *p)))
380
                return 0;
381
        tind_bh = ubh_bread (sb, tmp, uspi->s_bsize);
382
        if (tmp != fs32_to_cpu(sb, *p)) {
383
                ubh_brelse (tind_bh);
384
                return 1;
385
        }
386
        if (!tind_bh) {
387
                *p = 0;
388
                return 0;
389
        }
390
 
391
        for (i = tindirect_block ; i < uspi->s_apb ; i++) {
392
                tind = ubh_get_addr32 (tind_bh, i);
393
                retry |= ufs_trunc_dindirect(inode, UFS_NDADDR +
394
                        uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind);
395
                ubh_mark_buffer_dirty(tind_bh);
396
        }
397
        for (i = 0; i < uspi->s_apb; i++)
398
                if (*ubh_get_addr32 (tind_bh, i))
399
                        break;
400
        if (i >= uspi->s_apb) {
401
                if (ubh_max_bcount(tind_bh) != 1)
402
                        retry = 1;
403
                else {
404
                        tmp = fs32_to_cpu(sb, *p);
405
                        *p = 0;
406
                        inode->i_blocks -= uspi->s_nspb;
407
                        mark_inode_dirty(inode);
408
                        ufs_free_blocks (inode, tmp, uspi->s_fpb);
409
                        ubh_bforget(tind_bh);
410
                        tind_bh = NULL;
411
                }
412
        }
413
        if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
414
                ubh_ll_rw_block (WRITE, 1, &tind_bh);
415
                ubh_wait_on_buffer (tind_bh);
416
        }
417
        ubh_brelse (tind_bh);
418
 
419
        UFSD(("EXIT\n"))
420
        return retry;
421
}
422
 
423
void ufs_truncate (struct inode * inode)
424
{
425
        struct super_block * sb;
426
        struct ufs_sb_private_info * uspi;
427
        struct buffer_head * bh;
428
        unsigned offset;
429
        int err, retry;
430
 
431
        UFSD(("ENTER\n"))
432
        sb = inode->i_sb;
433
        uspi = sb->u.ufs_sb.s_uspi;
434
 
435
        if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
436
                return;
437
        if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
438
                return;
439
        while (1) {
440
                retry = ufs_trunc_direct(inode);
441
                retry |= ufs_trunc_indirect (inode, UFS_IND_BLOCK,
442
                        (u32 *) &inode->u.ufs_i.i_u1.i_data[UFS_IND_BLOCK]);
443
                retry |= ufs_trunc_dindirect (inode, UFS_IND_BLOCK + uspi->s_apb,
444
                        (u32 *) &inode->u.ufs_i.i_u1.i_data[UFS_DIND_BLOCK]);
445
                retry |= ufs_trunc_tindirect (inode);
446
                if (!retry)
447
                        break;
448
                if (IS_SYNC(inode) && (inode->i_state & I_DIRTY))
449
                        ufs_sync_inode (inode);
450
                run_task_queue(&tq_disk);
451
                yield();
452
        }
453
        offset = inode->i_size & uspi->s_fshift;
454
        if (offset) {
455
                bh = ufs_bread (inode, inode->i_size >> uspi->s_fshift, 0, &err);
456
                if (bh) {
457
                        memset (bh->b_data + offset, 0, uspi->s_fsize - offset);
458
                        mark_buffer_dirty (bh);
459
                        brelse (bh);
460
                }
461
        }
462
        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
463
        inode->u.ufs_i.i_lastfrag = DIRECT_FRAGMENT;
464
        mark_inode_dirty(inode);
465
        UFSD(("EXIT\n"))
466
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.