OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [ecos-2.0/] [packages/] [fs/] [jffs2/] [v2_0/] [src/] [nodemgmt.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1254 phoenix
/*
2
 * JFFS2 -- Journalling Flash File System, Version 2.
3
 *
4
 * Copyright (C) 2001, 2002 Red Hat, Inc.
5
 *
6
 * Created by David Woodhouse <dwmw2@cambridge.redhat.com>
7
 *
8
 * For licensing information, see the file 'LICENCE' in this directory.
9
 *
10
 * $Id: nodemgmt.c,v 1.1.1.1 2004-02-14 13:29:19 phoenix Exp $
11
 *
12
 */
13
 
14
#include <linux/kernel.h>
15
#include <linux/slab.h>
16
#include <linux/mtd/mtd.h>
17
#include <linux/compiler.h>
18
#include <linux/sched.h> /* For cond_resched() */
19
#include "nodelist.h"
20
 
21
/**
22
 *      jffs2_reserve_space - request physical space to write nodes to flash
23
 *      @c: superblock info
24
 *      @minsize: Minimum acceptable size of allocation
25
 *      @ofs: Returned value of node offset
26
 *      @len: Returned value of allocation length
27
 *      @prio: Allocation type - ALLOC_{NORMAL,DELETION}
28
 *
29
 *      Requests a block of physical space on the flash. Returns zero for success
30
 *      and puts 'ofs' and 'len' into the appriopriate place, or returns -ENOSPC
31
 *      or other error if appropriate.
32
 *
33
 *      If it returns zero, jffs2_reserve_space() also downs the per-filesystem
34
 *      allocation semaphore, to prevent more than one allocation from being
35
 *      active at any time. The semaphore is later released by jffs2_commit_allocation()
36
 *
37
 *      jffs2_reserve_space() may trigger garbage collection in order to make room
38
 *      for the requested allocation.
39
 */
40
 
41
static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len);
42
 
43
int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len, int prio)
44
{
45
        int ret = -EAGAIN;
46
        int blocksneeded = JFFS2_RESERVED_BLOCKS_WRITE;
47
        /* align it */
48
        minsize = PAD(minsize);
49
 
50
        if (prio == ALLOC_DELETION)
51
                blocksneeded = JFFS2_RESERVED_BLOCKS_DELETION;
52
 
53
        D1(printk(KERN_DEBUG "jffs2_reserve_space(): Requested 0x%x bytes\n", minsize));
54
        down(&c->alloc_sem);
55
 
56
        D1(printk(KERN_DEBUG "jffs2_reserve_space(): alloc sem got\n"));
57
 
58
        spin_lock(&c->erase_completion_lock);
59
 
60
        /* this needs a little more thought */
61
        while(ret == -EAGAIN) {
62
                while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
63
                        int ret;
64
 
65
                        up(&c->alloc_sem);
66
 
67
                        if (c->dirty_size + c->unchecked_size < c->sector_size) {
68
                                D1(printk(KERN_DEBUG "dirty size 0x%08x + unchecked_size 0x%08x < sector size 0x%08x, returning -ENOSPC\n",
69
                                          c->dirty_size, c->unchecked_size, c->sector_size));
70
                                spin_unlock(&c->erase_completion_lock);
71
                                return -ENOSPC;
72
                        }
73
 
74
                        D1(printk(KERN_DEBUG "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
75
                                  c->nr_free_blocks, c->nr_erasing_blocks, c->free_size, c->dirty_size, c->wasted_size, c->used_size, c->erasing_size, c->bad_size,
76
                                  c->free_size + c->dirty_size + c->wasted_size + c->used_size + c->erasing_size + c->bad_size, c->flash_size));
77
                        spin_unlock(&c->erase_completion_lock);
78
 
79
                        ret = jffs2_garbage_collect_pass(c);
80
                        if (ret)
81
                                return ret;
82
 
83
                        cond_resched();
84
 
85
                        if (signal_pending(current))
86
                                return -EINTR;
87
 
88
                        down(&c->alloc_sem);
89
                        spin_lock(&c->erase_completion_lock);
90
                }
91
 
92
                ret = jffs2_do_reserve_space(c, minsize, ofs, len);
93
                if (ret) {
94
                        D1(printk(KERN_DEBUG "jffs2_reserve_space: ret is %d\n", ret));
95
                }
96
        }
97
        spin_unlock(&c->erase_completion_lock);
98
        if (ret)
99
                up(&c->alloc_sem);
100
        return ret;
101
}
102
 
103
int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize, uint32_t *ofs, uint32_t *len)
104
{
105
        int ret = -EAGAIN;
106
        minsize = PAD(minsize);
107
 
108
        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc(): Requested 0x%x bytes\n", minsize));
109
 
110
        spin_lock(&c->erase_completion_lock);
111
        while(ret == -EAGAIN) {
112
                ret = jffs2_do_reserve_space(c, minsize, ofs, len);
113
                if (ret) {
114
                        D1(printk(KERN_DEBUG "jffs2_reserve_space_gc: looping, ret is %d\n", ret));
115
                }
116
        }
117
        spin_unlock(&c->erase_completion_lock);
118
        return ret;
119
}
120
 
121
/* Called with alloc sem _and_ erase_completion_lock */
122
static int jffs2_do_reserve_space(struct jffs2_sb_info *c,  uint32_t minsize, uint32_t *ofs, uint32_t *len)
123
{
124
        struct jffs2_eraseblock *jeb = c->nextblock;
125
 
126
 restart:
127
        if (jeb && minsize > jeb->free_size) {
128
                /* Skip the end of this block and file it as having some dirty space */
129
                /* If there's a pending write to it, flush now */
130
                if (c->wbuf_len) {
131
                        spin_unlock(&c->erase_completion_lock);
132
                        D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
133
                        jffs2_flush_wbuf(c, 1);
134
                        spin_lock(&c->erase_completion_lock);
135
                        /* We know nobody's going to have changed nextblock. Just continue */
136
                }
137
                c->wasted_size += jeb->free_size;
138
                c->free_size -= jeb->free_size;
139
                jeb->wasted_size += jeb->free_size;
140
                jeb->free_size = 0;
141
 
142
                /* Check, if we have a dirty block now, or if it was dirty already */
143
                if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
144
                        c->dirty_size += jeb->wasted_size;
145
                        c->wasted_size -= jeb->wasted_size;
146
                        jeb->dirty_size += jeb->wasted_size;
147
                        jeb->wasted_size = 0;
148
                        if (VERYDIRTY(c, jeb->dirty_size)) {
149
                                D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
150
                                  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
151
                                list_add_tail(&jeb->list, &c->very_dirty_list);
152
                        } else {
153
                                D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
154
                                  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
155
                                list_add_tail(&jeb->list, &c->dirty_list);
156
                        }
157
                } else {
158
                        D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
159
                          jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
160
                        list_add_tail(&jeb->list, &c->clean_list);
161
                }
162
                c->nextblock = jeb = NULL;
163
        }
164
 
165
        if (!jeb) {
166
                struct list_head *next;
167
                /* Take the next block off the 'free' list */
168
 
169
                if (list_empty(&c->free_list)) {
170
 
171
                        DECLARE_WAITQUEUE(wait, current);
172
 
173
                        if (!c->nr_erasing_blocks &&
174
                            !list_empty(&c->erasable_list)) {
175
                                struct jffs2_eraseblock *ejeb;
176
 
177
                                ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
178
                                list_del(&ejeb->list);
179
                                list_add_tail(&ejeb->list, &c->erase_pending_list);
180
                                c->nr_erasing_blocks++;
181
                                jffs2_erase_pending_trigger(c);
182
                                D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Triggering erase of erasable block at 0x%08x\n",
183
                                          ejeb->offset));
184
                        }
185
 
186
                        if (!c->nr_erasing_blocks &&
187
                            !list_empty(&c->erasable_pending_wbuf_list)) {
188
                                D1(printk(KERN_DEBUG "jffs2_do_reserve_space: Flushing write buffer\n"));
189
                                /* c->nextblock is NULL, no update to c->nextblock allowed */
190
                                spin_unlock(&c->erase_completion_lock);
191
                                jffs2_flush_wbuf(c, 1);
192
                                spin_lock(&c->erase_completion_lock);
193
                                /* Have another go. It'll be on the erasable_list now */
194
                                return -EAGAIN;
195
                        }
196
 
197
                        if (!c->nr_erasing_blocks) {
198
                                /* Ouch. We're in GC, or we wouldn't have got here.
199
                                   And there's no space left. At all. */
200
                                printk(KERN_CRIT "Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
201
                                       c->nr_erasing_blocks, c->nr_free_blocks, list_empty(&c->erasable_list)?"yes":"no",
202
                                       list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no");
203
                                return -ENOSPC;
204
                        }
205
                        /* Make sure this can't deadlock. Someone has to start the erases
206
                           of erase_pending blocks */
207
#ifdef __ECOS
208
                        /* In eCos, we don't have a handy kernel thread doing the erases for
209
                           us. We do them ourselves right now. */
210
                        jffs2_erase_pending_blocks(c);
211
#else
212
                        set_current_state(TASK_INTERRUPTIBLE);
213
                        add_wait_queue(&c->erase_wait, &wait);
214
                        D1(printk(KERN_DEBUG "Waiting for erases to complete. erasing_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
215
                                  c->nr_erasing_blocks, list_empty(&c->erasable_list)?"yes":"no",
216
                                  list_empty(&c->erasing_list)?"yes":"no", list_empty(&c->erase_pending_list)?"yes":"no"));
217
                        if (!list_empty(&c->erase_pending_list)) {
218
                                D1(printk(KERN_DEBUG "Triggering pending erases\n"));
219
                                jffs2_erase_pending_trigger(c);
220
                        }
221
                        spin_unlock(&c->erase_completion_lock);
222
                        schedule();
223
                        remove_wait_queue(&c->erase_wait, &wait);
224
                        spin_lock(&c->erase_completion_lock);
225
                        if (signal_pending(current)) {
226
                                return -EINTR;
227
                        }
228
#endif
229
                        /* An erase may have failed, decreasing the
230
                           amount of free space available. So we must
231
                           restart from the beginning */
232
                        return -EAGAIN;
233
                }
234
 
235
                next = c->free_list.next;
236
                list_del(next);
237
                c->nextblock = jeb = list_entry(next, struct jffs2_eraseblock, list);
238
                c->nr_free_blocks--;
239
 
240
                if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
241
                        printk(KERN_WARNING "Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n", jeb->offset, jeb->free_size);
242
                        goto restart;
243
                }
244
        }
245
        /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
246
           enough space */
247
        *ofs = jeb->offset + (c->sector_size - jeb->free_size);
248
        *len = jeb->free_size;
249
 
250
        if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
251
            !jeb->first_node->next_in_ino) {
252
                /* Only node in it beforehand was a CLEANMARKER node (we think).
253
                   So mark it obsolete now that there's going to be another node
254
                   in the block. This will reduce used_size to zero but We've
255
                   already set c->nextblock so that jffs2_mark_node_obsolete()
256
                   won't try to refile it to the dirty_list.
257
                */
258
                spin_unlock(&c->erase_completion_lock);
259
                jffs2_mark_node_obsolete(c, jeb->first_node);
260
                spin_lock(&c->erase_completion_lock);
261
        }
262
 
263
        D1(printk(KERN_DEBUG "jffs2_do_reserve_space(): Giving 0x%x bytes at 0x%x\n", *len, *ofs));
264
        return 0;
265
}
266
 
267
/**
268
 *      jffs2_add_physical_node_ref - add a physical node reference to the list
269
 *      @c: superblock info
270
 *      @new: new node reference to add
271
 *      @len: length of this physical node
272
 *      @dirty: dirty flag for new node
273
 *
274
 *      Should only be used to report nodes for which space has been allocated
275
 *      by jffs2_reserve_space.
276
 *
277
 *      Must be called with the alloc_sem held.
278
 */
279
 
280
int jffs2_add_physical_node_ref(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *new)
281
{
282
        struct jffs2_eraseblock *jeb;
283
        uint32_t len = new->totlen;
284
 
285
        jeb = &c->blocks[new->flash_offset / c->sector_size];
286
        D1(printk(KERN_DEBUG "jffs2_add_physical_node_ref(): Node at 0x%x(%d), size 0x%x\n", ref_offset(new), ref_flags(new), len));
287
#if 1
288
        if (jeb != c->nextblock || (ref_offset(new)) != jeb->offset + (c->sector_size - jeb->free_size)) {
289
                printk(KERN_WARNING "argh. node added in wrong place\n");
290
                jffs2_free_raw_node_ref(new);
291
                return -EINVAL;
292
        }
293
#endif
294
        spin_lock(&c->erase_completion_lock);
295
 
296
        if (!jeb->first_node)
297
                jeb->first_node = new;
298
        if (jeb->last_node)
299
                jeb->last_node->next_phys = new;
300
        jeb->last_node = new;
301
 
302
        jeb->free_size -= len;
303
        c->free_size -= len;
304
        if (ref_obsolete(new)) {
305
                jeb->dirty_size += len;
306
                c->dirty_size += len;
307
        } else {
308
                jeb->used_size += len;
309
                c->used_size += len;
310
        }
311
 
312
        if (!jeb->free_size && !jeb->dirty_size) {
313
                /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
314
                D1(printk(KERN_DEBUG "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
315
                          jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
316
                if (c->wbuf_len) {
317
                        /* Flush the last write in the block if it's outstanding */
318
                        spin_unlock(&c->erase_completion_lock);
319
                        jffs2_flush_wbuf(c, 1);
320
                        spin_lock(&c->erase_completion_lock);
321
                }
322
 
323
                list_add_tail(&jeb->list, &c->clean_list);
324
                c->nextblock = NULL;
325
        }
326
        ACCT_SANITY_CHECK(c,jeb);
327
        D1(ACCT_PARANOIA_CHECK(jeb));
328
 
329
        spin_unlock(&c->erase_completion_lock);
330
 
331
        return 0;
332
}
333
 
334
 
335
void jffs2_complete_reservation(struct jffs2_sb_info *c)
336
{
337
        D1(printk(KERN_DEBUG "jffs2_complete_reservation()\n"));
338
        jffs2_garbage_collect_trigger(c);
339
        up(&c->alloc_sem);
340
}
341
 
342
void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
343
{
344
        struct jffs2_eraseblock *jeb;
345
        int blocknr;
346
        struct jffs2_unknown_node n;
347
        int ret;
348
        size_t retlen;
349
 
350
        if(!ref) {
351
                printk(KERN_NOTICE "EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
352
                return;
353
        }
354
        if (ref_obsolete(ref)) {
355
                D1(printk(KERN_DEBUG "jffs2_mark_node_obsolete called with already obsolete node at 0x%08x\n", ref_offset(ref)));
356
                return;
357
        }
358
        blocknr = ref->flash_offset / c->sector_size;
359
        if (blocknr >= c->nr_blocks) {
360
                printk(KERN_NOTICE "raw node at 0x%08x is off the end of device!\n", ref->flash_offset);
361
                BUG();
362
        }
363
        jeb = &c->blocks[blocknr];
364
 
365
        spin_lock(&c->erase_completion_lock);
366
 
367
        if (ref_flags(ref) == REF_UNCHECKED) {
368
                D1(if (unlikely(jeb->unchecked_size < ref->totlen)) {
369
                        printk(KERN_NOTICE "raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
370
                               ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
371
                        BUG();
372
                })
373
                D1(printk(KERN_DEBUG "Obsoleting previously unchecked node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
374
                jeb->unchecked_size -= ref->totlen;
375
                c->unchecked_size -= ref->totlen;
376
        } else {
377
                D1(if (unlikely(jeb->used_size < ref->totlen)) {
378
                        printk(KERN_NOTICE "raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
379
                               ref->totlen, blocknr, ref->flash_offset, jeb->used_size);
380
                        BUG();
381
                })
382
                D1(printk(KERN_DEBUG "Obsoleting node at 0x%08x of len %x: ", ref_offset(ref), ref->totlen));
383
                jeb->used_size -= ref->totlen;
384
                c->used_size -= ref->totlen;
385
        }
386
 
387
        if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + ref->totlen)) && jeb != c->nextblock) {
388
                D1(printk("Dirtying\n"));
389
                jeb->dirty_size += ref->totlen + jeb->wasted_size;
390
                c->dirty_size += ref->totlen + jeb->wasted_size;
391
                c->wasted_size -= jeb->wasted_size;
392
                jeb->wasted_size = 0;
393
        } else {
394
                D1(printk("Wasting\n"));
395
                jeb->wasted_size += ref->totlen;
396
                c->wasted_size += ref->totlen;
397
        }
398
        ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
399
 
400
        ACCT_SANITY_CHECK(c, jeb);
401
 
402
        D1(ACCT_PARANOIA_CHECK(jeb));
403
 
404
        if (c->flags & JFFS2_SB_FLAG_MOUNTING) {
405
                /* Mount in progress. Don't muck about with the block
406
                   lists because they're not ready yet, and don't actually
407
                   obliterate nodes that look obsolete. If they weren't
408
                   marked obsolete on the flash at the time they _became_
409
                   obsolete, there was probably a reason for that. */
410
                spin_unlock(&c->erase_completion_lock);
411
                return;
412
        }
413
 
414
        if (jeb == c->nextblock) {
415
                D2(printk(KERN_DEBUG "Not moving nextblock 0x%08x to dirty/erase_pending list\n", jeb->offset));
416
        } else if (!jeb->used_size && !jeb->unchecked_size) {
417
                if (jeb == c->gcblock) {
418
                        D1(printk(KERN_DEBUG "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n", jeb->offset));
419
                        c->gcblock = NULL;
420
                } else {
421
                        D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
422
                        list_del(&jeb->list);
423
                }
424
                if (c->wbuf_len) {
425
                        D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));
426
                        list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
427
#if 0 /* This check was added to allow us to find places where we added nodes to the lists
428
         after dropping the alloc_sem, and it did that just fine. But it also caused us to
429
         lock the alloc_sem in other places, like clear_inode(), when we wouldn't otherwise
430
         have needed to. So I suspect it's outlived its usefulness. Thomas? */
431
 
432
                        /* We've changed the rules slightly. After
433
                           writing a node you now mustn't drop the
434
                           alloc_sem before you've finished all the
435
                           list management - this is so that when we
436
                           get here, we know that no other nodes have
437
                           been written, and the above check on wbuf
438
                           is valid - wbuf_len is nonzero IFF the node
439
                           which obsoletes this node is still in the
440
                           wbuf.
441
 
442
                           So we BUG() if that new rule is broken, to
443
                           make sure we catch it and fix it.
444
                        */
445
                        if (!down_trylock(&c->alloc_sem)) {
446
                                up(&c->alloc_sem);
447
                                printk(KERN_CRIT "jffs2_mark_node_obsolete() called with wbuf active but alloc_sem not locked!\n");
448
                                BUG();
449
                        }
450
#endif
451
                } else {
452
                        if (jiffies & 127) {
453
                                /* Most of the time, we just erase it immediately. Otherwise we
454
                                   spend ages scanning it on mount, etc. */
455
                                D1(printk(KERN_DEBUG "...and adding to erase_pending_list\n"));
456
                                list_add_tail(&jeb->list, &c->erase_pending_list);
457
                                c->nr_erasing_blocks++;
458
                                jffs2_erase_pending_trigger(c);
459
                        } else {
460
                                /* Sometimes, however, we leave it elsewhere so it doesn't get
461
                                   immediately reused, and we spread the load a bit. */
462
                                D1(printk(KERN_DEBUG "...and adding to erasable_list\n"));
463
                                list_add_tail(&jeb->list, &c->erasable_list);
464
                        }
465
                }
466
                D1(printk(KERN_DEBUG "Done OK\n"));
467
        } else if (jeb == c->gcblock) {
468
                D2(printk(KERN_DEBUG "Not moving gcblock 0x%08x to dirty_list\n", jeb->offset));
469
        } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - ref->totlen)) {
470
                D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n", jeb->offset));
471
                list_del(&jeb->list);
472
                D1(printk(KERN_DEBUG "...and adding to dirty_list\n"));
473
                list_add_tail(&jeb->list, &c->dirty_list);
474
        } else if (VERYDIRTY(c, jeb->dirty_size) &&
475
                   !VERYDIRTY(c, jeb->dirty_size - ref->totlen)) {
476
                D1(printk(KERN_DEBUG "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n", jeb->offset));
477
                list_del(&jeb->list);
478
                D1(printk(KERN_DEBUG "...and adding to very_dirty_list\n"));
479
                list_add_tail(&jeb->list, &c->very_dirty_list);
480
        } else {
481
                D1(printk(KERN_DEBUG "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
482
                          jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
483
        }
484
 
485
        spin_unlock(&c->erase_completion_lock);
486
 
487
        if (!jffs2_can_mark_obsolete(c))
488
                return;
489
        if (jffs2_is_readonly(c))
490
                return;
491
 
492
        D1(printk(KERN_DEBUG "obliterating obsoleted node at 0x%08x\n", ref_offset(ref)));
493
        ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
494
        if (ret) {
495
                printk(KERN_WARNING "Read error reading from obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
496
                return;
497
        }
498
        if (retlen != sizeof(n)) {
499
                printk(KERN_WARNING "Short read from obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
500
                return;
501
        }
502
        if (PAD(je32_to_cpu(n.totlen)) != PAD(ref->totlen)) {
503
                printk(KERN_WARNING "Node totlen on flash (0x%08x) != totlen in node ref (0x%08x)\n", je32_to_cpu(n.totlen), ref->totlen);
504
                return;
505
        }
506
        if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
507
                D1(printk(KERN_DEBUG "Node at 0x%08x was already marked obsolete (nodetype 0x%04x\n", ref_offset(ref), je16_to_cpu(n.nodetype)));
508
                return;
509
        }
510
        /* XXX FIXME: This is ugly now */
511
        n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
512
        ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
513
        if (ret) {
514
                printk(KERN_WARNING "Write error in obliterating obsoleted node at 0x%08x: %d\n", ref_offset(ref), ret);
515
                return;
516
        }
517
        if (retlen != sizeof(n)) {
518
                printk(KERN_WARNING "Short write in obliterating obsoleted node at 0x%08x: %zd\n", ref_offset(ref), retlen);
519
                return;
520
        }
521
}
522
 
523
#if CONFIG_JFFS2_FS_DEBUG > 0
524
void jffs2_dump_block_lists(struct jffs2_sb_info *c)
525
{
526
 
527
 
528
        printk(KERN_DEBUG "jffs2_dump_block_lists:\n");
529
        printk(KERN_DEBUG "flash_size: %08x\n", c->flash_size);
530
        printk(KERN_DEBUG "used_size: %08x\n", c->used_size);
531
        printk(KERN_DEBUG "dirty_size: %08x\n", c->dirty_size);
532
        printk(KERN_DEBUG "wasted_size: %08x\n", c->wasted_size);
533
        printk(KERN_DEBUG "unchecked_size: %08x\n", c->unchecked_size);
534
        printk(KERN_DEBUG "free_size: %08x\n", c->free_size);
535
        printk(KERN_DEBUG "erasing_size: %08x\n", c->erasing_size);
536
        printk(KERN_DEBUG "bad_size: %08x\n", c->bad_size);
537
        printk(KERN_DEBUG "sector_size: %08x\n", c->sector_size);
538
        printk(KERN_DEBUG "jffs2_reserved_blocks size: %08x\n",c->sector_size * JFFS2_RESERVED_BLOCKS_WRITE);
539
 
540
        if (c->nextblock) {
541
                printk(KERN_DEBUG "nextblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
542
                       c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->unchecked_size, c->nextblock->free_size);
543
        } else {
544
                printk(KERN_DEBUG "nextblock: NULL\n");
545
        }
546
        if (c->gcblock) {
547
                printk(KERN_DEBUG "gcblock: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
548
                       c->gcblock->offset, c->gcblock->used_size, c->gcblock->dirty_size, c->gcblock->wasted_size, c->gcblock->unchecked_size, c->gcblock->free_size);
549
        } else {
550
                printk(KERN_DEBUG "gcblock: NULL\n");
551
        }
552
        if (list_empty(&c->clean_list)) {
553
                printk(KERN_DEBUG "clean_list: empty\n");
554
        } else {
555
                struct list_head *this;
556
                int     numblocks = 0;
557
                uint32_t dirty = 0;
558
 
559
                list_for_each(this, &c->clean_list) {
560
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
561
                        numblocks ++;
562
                        dirty += jeb->wasted_size;
563
                        printk(KERN_DEBUG "clean_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n", jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
564
                }
565
                printk (KERN_DEBUG "Contains %d blocks with total wasted size %u, average wasted size: %u\n", numblocks, dirty, dirty / numblocks);
566
        }
567
        if (list_empty(&c->very_dirty_list)) {
568
                printk(KERN_DEBUG "very_dirty_list: empty\n");
569
        } else {
570
                struct list_head *this;
571
                int     numblocks = 0;
572
                uint32_t dirty = 0;
573
 
574
                list_for_each(this, &c->very_dirty_list) {
575
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
576
                        numblocks ++;
577
                        dirty += jeb->dirty_size;
578
                        printk(KERN_DEBUG "very_dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
579
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
580
                }
581
                printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
582
                        numblocks, dirty, dirty / numblocks);
583
        }
584
        if (list_empty(&c->dirty_list)) {
585
                printk(KERN_DEBUG "dirty_list: empty\n");
586
        } else {
587
                struct list_head *this;
588
                int     numblocks = 0;
589
                uint32_t dirty = 0;
590
 
591
                list_for_each(this, &c->dirty_list) {
592
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
593
                        numblocks ++;
594
                        dirty += jeb->dirty_size;
595
                        printk(KERN_DEBUG "dirty_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
596
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
597
                }
598
                printk (KERN_DEBUG "Contains %d blocks with total dirty size %u, average dirty size: %u\n",
599
                        numblocks, dirty, dirty / numblocks);
600
        }
601
        if (list_empty(&c->erasable_list)) {
602
                printk(KERN_DEBUG "erasable_list: empty\n");
603
        } else {
604
                struct list_head *this;
605
 
606
                list_for_each(this, &c->erasable_list) {
607
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
608
                        printk(KERN_DEBUG "erasable_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
609
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
610
                }
611
        }
612
        if (list_empty(&c->erasing_list)) {
613
                printk(KERN_DEBUG "erasing_list: empty\n");
614
        } else {
615
                struct list_head *this;
616
 
617
                list_for_each(this, &c->erasing_list) {
618
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
619
                        printk(KERN_DEBUG "erasing_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
620
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
621
                }
622
        }
623
        if (list_empty(&c->erase_pending_list)) {
624
                printk(KERN_DEBUG "erase_pending_list: empty\n");
625
        } else {
626
                struct list_head *this;
627
 
628
                list_for_each(this, &c->erase_pending_list) {
629
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
630
                        printk(KERN_DEBUG "erase_pending_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
631
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
632
                }
633
        }
634
        if (list_empty(&c->erasable_pending_wbuf_list)) {
635
                printk(KERN_DEBUG "erasable_pending_wbuf_list: empty\n");
636
        } else {
637
                struct list_head *this;
638
 
639
                list_for_each(this, &c->erasable_pending_wbuf_list) {
640
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
641
                        printk(KERN_DEBUG "erase_pending_wbuf_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
642
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
643
                }
644
        }
645
        if (list_empty(&c->free_list)) {
646
                printk(KERN_DEBUG "free_list: empty\n");
647
        } else {
648
                struct list_head *this;
649
 
650
                list_for_each(this, &c->free_list) {
651
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
652
                        printk(KERN_DEBUG "free_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
653
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
654
                }
655
        }
656
        if (list_empty(&c->bad_list)) {
657
                printk(KERN_DEBUG "bad_list: empty\n");
658
        } else {
659
                struct list_head *this;
660
 
661
                list_for_each(this, &c->bad_list) {
662
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
663
                        printk(KERN_DEBUG "bad_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
664
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
665
                }
666
        }
667
        if (list_empty(&c->bad_used_list)) {
668
                printk(KERN_DEBUG "bad_used_list: empty\n");
669
        } else {
670
                struct list_head *this;
671
 
672
                list_for_each(this, &c->bad_used_list) {
673
                        struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list);
674
                        printk(KERN_DEBUG "bad_used_list: %08x (used %08x, dirty %08x, wasted %08x, unchecked %08x, free %08x)\n",
675
                               jeb->offset, jeb->used_size, jeb->dirty_size, jeb->wasted_size, jeb->unchecked_size, jeb->free_size);
676
                }
677
        }
678
}
679
#endif /* CONFIG_JFFS2_FS_DEBUG */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.