OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [fs/] [jffs2/] [current/] [src/] [build.c] - Blame information for rev 786

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
/*
2
 * JFFS2 -- Journalling Flash File System, Version 2.
3
 *
4
 * Copyright (C) 2001-2003 Red Hat, Inc.
5
 *
6
 * Created by David Woodhouse <dwmw2@infradead.org>
7
 *
8
 * For licensing information, see the file 'LICENCE' in this directory.
9
 *
10
 * $Id: build.c,v 1.75 2005/07/22 10:32:07 dedekind Exp $
11
 *
12
 */
13
 
14
#include <linux/kernel.h>
15
#include <linux/sched.h>
16
#include <linux/slab.h>
17
#include <linux/vmalloc.h>
18
#include <linux/mtd/mtd.h>
19
#include "nodelist.h"
20
 
21
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *, struct jffs2_inode_cache *, struct jffs2_full_dirent **);
22
 
23
static inline struct jffs2_inode_cache *
24
first_inode_chain(int *i, struct jffs2_sb_info *c)
25
{
26
        for (; *i < INOCACHE_HASHSIZE; (*i)++) {
27
                if (c->inocache_list[*i])
28
                        return c->inocache_list[*i];
29
        }
30
        return NULL;
31
}
32
 
33
static inline struct jffs2_inode_cache *
34
next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c)
35
{
36
        /* More in this chain? */
37
        if (ic->next)
38
                return ic->next;
39
        (*i)++;
40
        return first_inode_chain(i, c);
41
}
42
 
43
#define for_each_inode(i, c, ic)                        \
44
        for (i = 0, ic = first_inode_chain(&i, (c));     \
45
             ic;                                        \
46
             ic = next_inode(&i, ic, (c)))
47
 
48
 
49
static inline void jffs2_build_inode_pass1(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
50
{
51
        struct jffs2_full_dirent *fd;
52
 
53
        D1(printk(KERN_DEBUG "jffs2_build_inode building directory inode #%u\n", ic->ino));
54
 
55
        /* For each child, increase nlink */
56
        for(fd = ic->scan_dents; fd; fd = fd->next) {
57
                struct jffs2_inode_cache *child_ic;
58
                if (!fd->ino)
59
                        continue;
60
 
61
                /* XXX: Can get high latency here with huge directories */
62
 
63
                child_ic = jffs2_get_ino_cache(c, fd->ino);
64
                if (!child_ic) {
65
                        printk(KERN_NOTICE "Eep. Child \"%s\" (ino #%u) of dir ino #%u doesn't exist!\n",
66
                                  fd->name, fd->ino, ic->ino);
67
                        jffs2_mark_node_obsolete(c, fd->raw);
68
                        continue;
69
                }
70
 
71
                if (child_ic->nlink++ && fd->type == DT_DIR) {
72
                        printk(KERN_NOTICE "Child dir \"%s\" (ino #%u) of dir ino #%u appears to be a hard link\n", fd->name, fd->ino, ic->ino);
73
                        if (fd->ino == 1 && ic->ino == 1) {
74
                                printk(KERN_NOTICE "This is mostly harmless, and probably caused by creating a JFFS2 image\n");
75
                                printk(KERN_NOTICE "using a buggy version of mkfs.jffs2. Use at least v1.17.\n");
76
                        }
77
                        /* What do we do about it? */
78
                }
79
                D1(printk(KERN_DEBUG "Increased nlink for child \"%s\" (ino #%u)\n", fd->name, fd->ino));
80
                /* Can't free them. We might need them in pass 2 */
81
        }
82
}
83
 
84
/* Scan plan:
85
 - Scan physical nodes. Build map of inodes/dirents. Allocate inocaches as we go
86
 - Scan directory tree from top down, setting nlink in inocaches
87
 - Scan inocaches for inodes with nlink==0
88
*/
89
static int jffs2_build_filesystem(struct jffs2_sb_info *c)
90
{
91
        int ret;
92
        int i;
93
        struct jffs2_inode_cache *ic;
94
        struct jffs2_full_dirent *fd;
95
        struct jffs2_full_dirent *dead_fds = NULL;
96
 
97
        /* First, scan the medium and build all the inode caches with
98
           lists of physical nodes */
99
 
100
        c->flags |= JFFS2_SB_FLAG_SCANNING;
101
        ret = jffs2_scan_medium(c);
102
        c->flags &= ~JFFS2_SB_FLAG_SCANNING;
103
        if (ret)
104
                goto exit;
105
 
106
        D1(printk(KERN_DEBUG "Scanned flash completely\n"));
107
        jffs2_dbg_dump_block_lists_nolock(c);
108
 
109
        c->flags |= JFFS2_SB_FLAG_BUILDING;
110
        /* Now scan the directory tree, increasing nlink according to every dirent found. */
111
        for_each_inode(i, c, ic) {
112
                D1(printk(KERN_DEBUG "Pass 1: ino #%u\n", ic->ino));
113
 
114
                D1(BUG_ON(ic->ino > c->highest_ino));
115
 
116
                if (ic->scan_dents) {
117
                        jffs2_build_inode_pass1(c, ic);
118
                        cond_resched();
119
                }
120
        }
121
 
122
        D1(printk(KERN_DEBUG "Pass 1 complete\n"));
123
 
124
        /* Next, scan for inodes with nlink == 0 and remove them. If
125
           they were directories, then decrement the nlink of their
126
           children too, and repeat the scan. As that's going to be
127
           a fairly uncommon occurrence, it's not so evil to do it this
128
           way. Recursion bad. */
129
        D1(printk(KERN_DEBUG "Pass 2 starting\n"));
130
 
131
        for_each_inode(i, c, ic) {
132
                D1(printk(KERN_DEBUG "Pass 2: ino #%u, nlink %d, ic %p, nodes %p\n", ic->ino, ic->nlink, ic, ic->nodes));
133
                if (ic->nlink)
134
                        continue;
135
 
136
                jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
137
                cond_resched();
138
        }
139
 
140
        D1(printk(KERN_DEBUG "Pass 2a starting\n"));
141
 
142
        while (dead_fds) {
143
                fd = dead_fds;
144
                dead_fds = fd->next;
145
 
146
                ic = jffs2_get_ino_cache(c, fd->ino);
147
                D1(printk(KERN_DEBUG "Removing dead_fd ino #%u (\"%s\"), ic at %p\n", fd->ino, fd->name, ic));
148
 
149
                if (ic)
150
                        jffs2_build_remove_unlinked_inode(c, ic, &dead_fds);
151
                jffs2_free_full_dirent(fd);
152
        }
153
 
154
        D1(printk(KERN_DEBUG "Pass 2 complete\n"));
155
 
156
        /* Finally, we can scan again and free the dirent structs */
157
        for_each_inode(i, c, ic) {
158
                D1(printk(KERN_DEBUG "Pass 3: ino #%u, ic %p, nodes %p\n", ic->ino, ic, ic->nodes));
159
 
160
                while(ic->scan_dents) {
161
                        fd = ic->scan_dents;
162
                        ic->scan_dents = fd->next;
163
                        jffs2_free_full_dirent(fd);
164
                }
165
                ic->scan_dents = NULL;
166
                cond_resched();
167
        }
168
        c->flags &= ~JFFS2_SB_FLAG_BUILDING;
169
 
170
        D1(printk(KERN_DEBUG "Pass 3 complete\n"));
171
        jffs2_dbg_dump_block_lists_nolock(c);
172
 
173
        /* Rotate the lists by some number to ensure wear levelling */
174
        jffs2_rotate_lists(c);
175
 
176
        ret = 0;
177
 
178
exit:
179
        if (ret) {
180
                for_each_inode(i, c, ic) {
181
                        while(ic->scan_dents) {
182
                                fd = ic->scan_dents;
183
                                ic->scan_dents = fd->next;
184
                                jffs2_free_full_dirent(fd);
185
                        }
186
                }
187
        }
188
 
189
        return ret;
190
}
191
 
192
static void jffs2_build_remove_unlinked_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, struct jffs2_full_dirent **dead_fds)
193
{
194
        struct jffs2_raw_node_ref *raw;
195
        struct jffs2_full_dirent *fd;
196
 
197
        D1(printk(KERN_DEBUG "JFFS2: Removing ino #%u with nlink == zero.\n", ic->ino));
198
 
199
        raw = ic->nodes;
200
        while (raw != (void *)ic) {
201
                struct jffs2_raw_node_ref *next = raw->next_in_ino;
202
                D1(printk(KERN_DEBUG "obsoleting node at 0x%08x\n", ref_offset(raw)));
203
                jffs2_mark_node_obsolete(c, raw);
204
                raw = next;
205
        }
206
 
207
        if (ic->scan_dents) {
208
                int whinged = 0;
209
                D1(printk(KERN_DEBUG "Inode #%u was a directory which may have children...\n", ic->ino));
210
 
211
                while(ic->scan_dents) {
212
                        struct jffs2_inode_cache *child_ic;
213
 
214
                        fd = ic->scan_dents;
215
                        ic->scan_dents = fd->next;
216
 
217
                        if (!fd->ino) {
218
                                /* It's a deletion dirent. Ignore it */
219
                                D1(printk(KERN_DEBUG "Child \"%s\" is a deletion dirent, skipping...\n", fd->name));
220
                                jffs2_free_full_dirent(fd);
221
                                continue;
222
                        }
223
                        if (!whinged) {
224
                                whinged = 1;
225
                                printk(KERN_NOTICE "Inode #%u was a directory with children - removing those too...\n", ic->ino);
226
                        }
227
 
228
                        D1(printk(KERN_DEBUG "Removing child \"%s\", ino #%u\n",
229
                                  fd->name, fd->ino));
230
 
231
                        child_ic = jffs2_get_ino_cache(c, fd->ino);
232
                        if (!child_ic) {
233
                                printk(KERN_NOTICE "Cannot remove child \"%s\", ino #%u, because it doesn't exist\n", fd->name, fd->ino);
234
                                jffs2_free_full_dirent(fd);
235
                                continue;
236
                        }
237
 
238
                        /* Reduce nlink of the child. If it's now zero, stick it on the
239
                           dead_fds list to be cleaned up later. Else just free the fd */
240
 
241
                        child_ic->nlink--;
242
 
243
                        if (!child_ic->nlink) {
244
                                D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got zero nlink. Adding to dead_fds list.\n",
245
                                          fd->ino, fd->name));
246
                                fd->next = *dead_fds;
247
                                *dead_fds = fd;
248
                        } else {
249
                                D1(printk(KERN_DEBUG "Inode #%u (\"%s\") has now got nlink %d. Ignoring.\n",
250
                                          fd->ino, fd->name, child_ic->nlink));
251
                                jffs2_free_full_dirent(fd);
252
                        }
253
                }
254
        }
255
 
256
        /*
257
           We don't delete the inocache from the hash list and free it yet.
258
           The erase code will do that, when all the nodes are completely gone.
259
        */
260
}
261
 
262
static void jffs2_calc_trigger_levels(struct jffs2_sb_info *c)
263
{
264
        uint32_t size;
265
 
266
        /* Deletion should almost _always_ be allowed. We're fairly
267
           buggered once we stop allowing people to delete stuff
268
           because there's not enough free space... */
269
        c->resv_blocks_deletion = 2;
270
 
271
        /* Be conservative about how much space we need before we allow writes.
272
           On top of that which is required for deletia, require an extra 2%
273
           of the medium to be available, for overhead caused by nodes being
274
           split across blocks, etc. */
275
 
276
        size = c->flash_size / 50; /* 2% of flash size */
277
        size += c->nr_blocks * 100; /* And 100 bytes per eraseblock */
278
        size += c->sector_size - 1; /* ... and round up */
279
 
280
        c->resv_blocks_write = c->resv_blocks_deletion + (size / c->sector_size);
281
 
282
        /* When do we let the GC thread run in the background */
283
 
284
        c->resv_blocks_gctrigger = c->resv_blocks_write + 1;
285
 
286
        /* When do we allow garbage collection to merge nodes to make
287
           long-term progress at the expense of short-term space exhaustion? */
288
        c->resv_blocks_gcmerge = c->resv_blocks_deletion + 1;
289
 
290
        /* When do we allow garbage collection to eat from bad blocks rather
291
           than actually making progress? */
292
        c->resv_blocks_gcbad = 0;//c->resv_blocks_deletion + 2;
293
 
294
        /* If there's less than this amount of dirty space, don't bother
295
           trying to GC to make more space. It'll be a fruitless task */
296
        c->nospc_dirty_size = c->sector_size + (c->flash_size / 100);
297
 
298
        D1(printk(KERN_DEBUG "JFFS2 trigger levels (size %d KiB, block size %d KiB, %d blocks)\n",
299
                  c->flash_size / 1024, c->sector_size / 1024, c->nr_blocks));
300
        D1(printk(KERN_DEBUG "Blocks required to allow deletion:    %d (%d KiB)\n",
301
                  c->resv_blocks_deletion, c->resv_blocks_deletion*c->sector_size/1024));
302
        D1(printk(KERN_DEBUG "Blocks required to allow writes:      %d (%d KiB)\n",
303
                  c->resv_blocks_write, c->resv_blocks_write*c->sector_size/1024));
304
        D1(printk(KERN_DEBUG "Blocks required to quiesce GC thread: %d (%d KiB)\n",
305
                  c->resv_blocks_gctrigger, c->resv_blocks_gctrigger*c->sector_size/1024));
306
        D1(printk(KERN_DEBUG "Blocks required to allow GC merges:   %d (%d KiB)\n",
307
                  c->resv_blocks_gcmerge, c->resv_blocks_gcmerge*c->sector_size/1024));
308
        D1(printk(KERN_DEBUG "Blocks required to GC bad blocks:     %d (%d KiB)\n",
309
                  c->resv_blocks_gcbad, c->resv_blocks_gcbad*c->sector_size/1024));
310
        D1(printk(KERN_DEBUG "Amount of dirty space required to GC: %d bytes\n",
311
                  c->nospc_dirty_size));
312
}
313
 
314
int jffs2_do_mount_fs(struct jffs2_sb_info *c)
315
{
316
        int i;
317
 
318
        c->free_size = c->flash_size;
319
        c->nr_blocks = c->flash_size / c->sector_size;
320
#ifndef __ECOS
321
        if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
322
                c->blocks = vmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks);
323
        else
324
#endif
325
                c->blocks = kmalloc(sizeof(struct jffs2_eraseblock) * c->nr_blocks, GFP_KERNEL);
326
        if (!c->blocks)
327
                return -ENOMEM;
328
        for (i=0; i<c->nr_blocks; i++) {
329
                INIT_LIST_HEAD(&c->blocks[i].list);
330
                c->blocks[i].offset = i * c->sector_size;
331
                c->blocks[i].free_size = c->sector_size;
332
                c->blocks[i].dirty_size = 0;
333
                c->blocks[i].wasted_size = 0;
334
                c->blocks[i].unchecked_size = 0;
335
                c->blocks[i].used_size = 0;
336
                c->blocks[i].first_node = NULL;
337
                c->blocks[i].last_node = NULL;
338
                c->blocks[i].bad_count = 0;
339
        }
340
 
341
        INIT_LIST_HEAD(&c->clean_list);
342
        INIT_LIST_HEAD(&c->very_dirty_list);
343
        INIT_LIST_HEAD(&c->dirty_list);
344
        INIT_LIST_HEAD(&c->erasable_list);
345
        INIT_LIST_HEAD(&c->erasing_list);
346
        INIT_LIST_HEAD(&c->erase_pending_list);
347
        INIT_LIST_HEAD(&c->erasable_pending_wbuf_list);
348
        INIT_LIST_HEAD(&c->erase_complete_list);
349
        INIT_LIST_HEAD(&c->free_list);
350
        INIT_LIST_HEAD(&c->bad_list);
351
        INIT_LIST_HEAD(&c->bad_used_list);
352
        c->highest_ino = 1;
353
 
354
        if (jffs2_build_filesystem(c)) {
355
                D1(printk(KERN_DEBUG "build_fs failed\n"));
356
                jffs2_free_ino_caches(c);
357
                jffs2_free_raw_node_refs(c);
358
#ifndef __ECOS
359
                if (c->mtd->flags & MTD_NO_VIRTBLOCKS)
360
                    vfree(c->blocks);
361
                else
362
#endif
363
                    kfree(c->blocks);
364
 
365
                return -EIO;
366
        }
367
 
368
        jffs2_calc_trigger_levels(c);
369
 
370
        return 0;
371
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.