1 |
27 |
unneback |
/*
|
2 |
|
|
* JFFS2 -- Journalling Flash File System, Version 2.
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 2001, 2002 Red Hat, Inc.
|
5 |
|
|
*
|
6 |
|
|
* Created by David Woodhouse <dwmw2@cambridge.redhat.com>
|
7 |
|
|
*
|
8 |
|
|
* For licensing information, see the file 'LICENCE' in this directory.
|
9 |
|
|
*
|
10 |
|
|
* $Id: scan.c,v 1.1.1.1 2004-02-14 13:29:20 phoenix Exp $
|
11 |
|
|
*
|
12 |
|
|
*/
|
13 |
|
|
#include <linux/kernel.h>
|
14 |
|
|
#include <linux/sched.h>
|
15 |
|
|
#include <linux/slab.h>
|
16 |
|
|
#include <linux/mtd/mtd.h>
|
17 |
|
|
#include <linux/pagemap.h>
|
18 |
|
|
#include <linux/crc32.h>
|
19 |
|
|
#include <linux/compiler.h>
|
20 |
|
|
#include "nodelist.h"
|
21 |
|
|
|
22 |
|
|
#define EMPTY_SCAN_SIZE 1024
|
23 |
|
|
|
24 |
|
|
#define DIRTY_SPACE(x) do { typeof(x) _x = (x); \
|
25 |
|
|
c->free_size -= _x; c->dirty_size += _x; \
|
26 |
|
|
jeb->free_size -= _x ; jeb->dirty_size += _x; \
|
27 |
|
|
}while(0)
|
28 |
|
|
#define USED_SPACE(x) do { typeof(x) _x = (x); \
|
29 |
|
|
c->free_size -= _x; c->used_size += _x; \
|
30 |
|
|
jeb->free_size -= _x ; jeb->used_size += _x; \
|
31 |
|
|
}while(0)
|
32 |
|
|
#define UNCHECKED_SPACE(x) do { typeof(x) _x = (x); \
|
33 |
|
|
c->free_size -= _x; c->unchecked_size += _x; \
|
34 |
|
|
jeb->free_size -= _x ; jeb->unchecked_size += _x; \
|
35 |
|
|
}while(0)
|
36 |
|
|
|
37 |
|
|
#define noisy_printk(noise, args...) do { \
|
38 |
|
|
if (*(noise)) { \
|
39 |
|
|
printk(KERN_NOTICE args); \
|
40 |
|
|
(*(noise))--; \
|
41 |
|
|
if (!(*(noise))) { \
|
42 |
|
|
printk(KERN_NOTICE "Further such events for this erase block will not be printed\n"); \
|
43 |
|
|
} \
|
44 |
|
|
} \
|
45 |
|
|
} while(0)
|
46 |
|
|
|
47 |
|
|
static uint32_t pseudo_random;
|
48 |
|
|
|
49 |
|
|
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
50 |
|
|
unsigned char *buf, uint32_t buf_size);
|
51 |
|
|
|
52 |
|
|
/* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
|
53 |
|
|
* Returning an error will abort the mount - bad checksums etc. should just mark the space
|
54 |
|
|
* as dirty.
|
55 |
|
|
*/
|
56 |
|
|
static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
57 |
|
|
struct jffs2_raw_inode *ri, uint32_t ofs);
|
58 |
|
|
static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
59 |
|
|
struct jffs2_raw_dirent *rd, uint32_t ofs);
|
60 |
|
|
|
61 |
|
|
#define BLK_STATE_ALLFF 0
|
62 |
|
|
#define BLK_STATE_CLEAN 1
|
63 |
|
|
#define BLK_STATE_PARTDIRTY 2
|
64 |
|
|
#define BLK_STATE_CLEANMARKER 3
|
65 |
|
|
#define BLK_STATE_ALLDIRTY 4
|
66 |
|
|
#define BLK_STATE_BADBLOCK 5
|
67 |
|
|
|
68 |
|
|
int jffs2_scan_medium(struct jffs2_sb_info *c)
|
69 |
|
|
{
|
70 |
|
|
int i, ret;
|
71 |
|
|
uint32_t empty_blocks = 0, bad_blocks = 0;
|
72 |
|
|
unsigned char *flashbuf = NULL;
|
73 |
|
|
uint32_t buf_size = 0;
|
74 |
|
|
#ifndef __ECOS
|
75 |
|
|
size_t pointlen;
|
76 |
|
|
|
77 |
|
|
if (c->mtd->point) {
|
78 |
|
|
ret = c->mtd->point (c->mtd, 0, c->mtd->size, &pointlen, &flashbuf);
|
79 |
|
|
if (!ret && pointlen < c->mtd->size) {
|
80 |
|
|
/* Don't muck about if it won't let us point to the whole flash */
|
81 |
|
|
D1(printk(KERN_DEBUG "MTD point returned len too short: 0x%zx\n", pointlen));
|
82 |
|
|
c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
|
83 |
|
|
flashbuf = NULL;
|
84 |
|
|
}
|
85 |
|
|
if (ret)
|
86 |
|
|
D1(printk(KERN_DEBUG "MTD point failed %d\n", ret));
|
87 |
|
|
}
|
88 |
|
|
#endif
|
89 |
|
|
if (!flashbuf) {
|
90 |
|
|
/* For NAND it's quicker to read a whole eraseblock at a time,
|
91 |
|
|
apparently */
|
92 |
|
|
if (jffs2_cleanmarker_oob(c))
|
93 |
|
|
buf_size = c->sector_size;
|
94 |
|
|
else
|
95 |
|
|
buf_size = PAGE_SIZE;
|
96 |
|
|
|
97 |
|
|
D1(printk(KERN_DEBUG "Allocating readbuf of %d bytes\n", buf_size));
|
98 |
|
|
flashbuf = kmalloc(buf_size, GFP_KERNEL);
|
99 |
|
|
if (!flashbuf)
|
100 |
|
|
return -ENOMEM;
|
101 |
|
|
}
|
102 |
|
|
|
103 |
|
|
for (i=0; i<c->nr_blocks; i++) {
|
104 |
|
|
struct jffs2_eraseblock *jeb = &c->blocks[i];
|
105 |
|
|
|
106 |
|
|
ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset), buf_size);
|
107 |
|
|
|
108 |
|
|
if (ret < 0)
|
109 |
|
|
return ret;
|
110 |
|
|
|
111 |
|
|
ACCT_PARANOIA_CHECK(jeb);
|
112 |
|
|
|
113 |
|
|
/* Now decide which list to put it on */
|
114 |
|
|
switch(ret) {
|
115 |
|
|
case BLK_STATE_ALLFF:
|
116 |
|
|
/*
|
117 |
|
|
* Empty block. Since we can't be sure it
|
118 |
|
|
* was entirely erased, we just queue it for erase
|
119 |
|
|
* again. It will be marked as such when the erase
|
120 |
|
|
* is complete. Meanwhile we still count it as empty
|
121 |
|
|
* for later checks.
|
122 |
|
|
*/
|
123 |
|
|
empty_blocks++;
|
124 |
|
|
list_add(&jeb->list, &c->erase_pending_list);
|
125 |
|
|
c->nr_erasing_blocks++;
|
126 |
|
|
break;
|
127 |
|
|
|
128 |
|
|
case BLK_STATE_CLEANMARKER:
|
129 |
|
|
/* Only a CLEANMARKER node is valid */
|
130 |
|
|
if (!jeb->dirty_size) {
|
131 |
|
|
/* It's actually free */
|
132 |
|
|
list_add(&jeb->list, &c->free_list);
|
133 |
|
|
c->nr_free_blocks++;
|
134 |
|
|
} else {
|
135 |
|
|
/* Dirt */
|
136 |
|
|
D1(printk(KERN_DEBUG "Adding all-dirty block at 0x%08x to erase_pending_list\n", jeb->offset));
|
137 |
|
|
list_add(&jeb->list, &c->erase_pending_list);
|
138 |
|
|
c->nr_erasing_blocks++;
|
139 |
|
|
}
|
140 |
|
|
break;
|
141 |
|
|
|
142 |
|
|
case BLK_STATE_CLEAN:
|
143 |
|
|
/* Full (or almost full) of clean data. Clean list */
|
144 |
|
|
list_add(&jeb->list, &c->clean_list);
|
145 |
|
|
break;
|
146 |
|
|
|
147 |
|
|
case BLK_STATE_PARTDIRTY:
|
148 |
|
|
/* Some data, but not full. Dirty list. */
|
149 |
|
|
/* Except that we want to remember the block with most free space,
|
150 |
|
|
and stick it in the 'nextblock' position to start writing to it.
|
151 |
|
|
Later when we do snapshots, this must be the most recent block,
|
152 |
|
|
not the one with most free space.
|
153 |
|
|
*/
|
154 |
|
|
if (jeb->free_size > 2*sizeof(struct jffs2_raw_inode) &&
|
155 |
|
|
(jffs2_can_mark_obsolete(c) || jeb->free_size > c->wbuf_pagesize) &&
|
156 |
|
|
(!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
|
157 |
|
|
/* Better candidate for the next writes to go to */
|
158 |
|
|
if (c->nextblock) {
|
159 |
|
|
c->nextblock->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
|
160 |
|
|
c->dirty_size += c->nextblock->free_size + c->nextblock->wasted_size;
|
161 |
|
|
c->free_size -= c->nextblock->free_size;
|
162 |
|
|
c->wasted_size -= c->nextblock->wasted_size;
|
163 |
|
|
c->nextblock->free_size = c->nextblock->wasted_size = 0;
|
164 |
|
|
if (VERYDIRTY(c, c->nextblock->dirty_size)) {
|
165 |
|
|
list_add(&c->nextblock->list, &c->very_dirty_list);
|
166 |
|
|
} else {
|
167 |
|
|
list_add(&c->nextblock->list, &c->dirty_list);
|
168 |
|
|
}
|
169 |
|
|
}
|
170 |
|
|
c->nextblock = jeb;
|
171 |
|
|
} else {
|
172 |
|
|
jeb->dirty_size += jeb->free_size + jeb->wasted_size;
|
173 |
|
|
c->dirty_size += jeb->free_size + jeb->wasted_size;
|
174 |
|
|
c->free_size -= jeb->free_size;
|
175 |
|
|
c->wasted_size -= jeb->wasted_size;
|
176 |
|
|
jeb->free_size = jeb->wasted_size = 0;
|
177 |
|
|
if (VERYDIRTY(c, jeb->dirty_size)) {
|
178 |
|
|
list_add(&jeb->list, &c->very_dirty_list);
|
179 |
|
|
} else {
|
180 |
|
|
list_add(&jeb->list, &c->dirty_list);
|
181 |
|
|
}
|
182 |
|
|
}
|
183 |
|
|
break;
|
184 |
|
|
|
185 |
|
|
case BLK_STATE_ALLDIRTY:
|
186 |
|
|
/* Nothing valid - not even a clean marker. Needs erasing. */
|
187 |
|
|
/* For now we just put it on the erasing list. We'll start the erases later */
|
188 |
|
|
D1(printk(KERN_NOTICE "JFFS2: Erase block at 0x%08x is not formatted. It will be erased\n", jeb->offset));
|
189 |
|
|
list_add(&jeb->list, &c->erase_pending_list);
|
190 |
|
|
c->nr_erasing_blocks++;
|
191 |
|
|
break;
|
192 |
|
|
|
193 |
|
|
case BLK_STATE_BADBLOCK:
|
194 |
|
|
D1(printk(KERN_NOTICE "JFFS2: Block at 0x%08x is bad\n", jeb->offset));
|
195 |
|
|
list_add(&jeb->list, &c->bad_list);
|
196 |
|
|
c->bad_size += c->sector_size;
|
197 |
|
|
c->free_size -= c->sector_size;
|
198 |
|
|
bad_blocks++;
|
199 |
|
|
break;
|
200 |
|
|
default:
|
201 |
|
|
printk(KERN_WARNING "jffs2_scan_medium(): unknown block state\n");
|
202 |
|
|
BUG();
|
203 |
|
|
}
|
204 |
|
|
}
|
205 |
|
|
|
206 |
|
|
/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
|
207 |
|
|
if (c->nextblock && (c->nextblock->dirty_size)) {
|
208 |
|
|
c->nextblock->wasted_size += c->nextblock->dirty_size;
|
209 |
|
|
c->wasted_size += c->nextblock->dirty_size;
|
210 |
|
|
c->dirty_size -= c->nextblock->dirty_size;
|
211 |
|
|
c->nextblock->dirty_size = 0;
|
212 |
|
|
}
|
213 |
|
|
|
214 |
|
|
if (!jffs2_can_mark_obsolete(c) && c->nextblock && (c->nextblock->free_size & (c->wbuf_pagesize-1))) {
|
215 |
|
|
/* If we're going to start writing into a block which already
|
216 |
|
|
contains data, and the end of the data isn't page-aligned,
|
217 |
|
|
skip a little and align it. */
|
218 |
|
|
|
219 |
|
|
uint32_t skip = c->nextblock->free_size & (c->wbuf_pagesize-1);
|
220 |
|
|
|
221 |
|
|
D1(printk(KERN_DEBUG "jffs2_scan_medium(): Skipping %d bytes in nextblock to ensure page alignment\n",
|
222 |
|
|
skip));
|
223 |
|
|
c->nextblock->wasted_size += skip;
|
224 |
|
|
c->wasted_size += skip;
|
225 |
|
|
|
226 |
|
|
c->nextblock->free_size -= skip;
|
227 |
|
|
c->free_size -= skip;
|
228 |
|
|
}
|
229 |
|
|
if (c->nr_erasing_blocks) {
|
230 |
|
|
if ( !c->used_size && ((empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) {
|
231 |
|
|
printk(KERN_NOTICE "Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
|
232 |
|
|
printk(KERN_NOTICE "empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",empty_blocks,bad_blocks,c->nr_blocks);
|
233 |
|
|
return -EIO;
|
234 |
|
|
}
|
235 |
|
|
jffs2_erase_pending_trigger(c);
|
236 |
|
|
}
|
237 |
|
|
if (buf_size)
|
238 |
|
|
kfree(flashbuf);
|
239 |
|
|
#ifndef __ECOS
|
240 |
|
|
else
|
241 |
|
|
c->mtd->unpoint(c->mtd, flashbuf, 0, c->mtd->size);
|
242 |
|
|
#endif
|
243 |
|
|
return 0;
|
244 |
|
|
}
|
245 |
|
|
|
246 |
|
|
static int jffs2_fill_scan_buf (struct jffs2_sb_info *c, unsigned char *buf,
|
247 |
|
|
uint32_t ofs, uint32_t len)
|
248 |
|
|
{
|
249 |
|
|
int ret;
|
250 |
|
|
size_t retlen;
|
251 |
|
|
|
252 |
|
|
ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
|
253 |
|
|
if (ret) {
|
254 |
|
|
D1(printk(KERN_WARNING "mtd->read(0x%x bytes from 0x%x) returned %d\n", len, ofs, ret));
|
255 |
|
|
return ret;
|
256 |
|
|
}
|
257 |
|
|
if (retlen < len) {
|
258 |
|
|
D1(printk(KERN_WARNING "Read at 0x%x gave only 0x%zx bytes\n", ofs, retlen));
|
259 |
|
|
return -EIO;
|
260 |
|
|
}
|
261 |
|
|
D2(printk(KERN_DEBUG "Read 0x%x bytes from 0x%08x into buf\n", len, ofs));
|
262 |
|
|
D2(printk(KERN_DEBUG "000: %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
263 |
|
|
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], buf[13], buf[14], buf[15]));
|
264 |
|
|
return 0;
|
265 |
|
|
}
|
266 |
|
|
|
267 |
|
|
static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
268 |
|
|
unsigned char *buf, uint32_t buf_size) {
|
269 |
|
|
struct jffs2_unknown_node *node;
|
270 |
|
|
struct jffs2_unknown_node crcnode;
|
271 |
|
|
uint32_t ofs, prevofs;
|
272 |
|
|
uint32_t hdr_crc, buf_ofs, buf_len;
|
273 |
|
|
int err;
|
274 |
|
|
int noise = 0;
|
275 |
|
|
int wasempty = 0;
|
276 |
|
|
uint32_t empty_start = 0;
|
277 |
|
|
#ifdef CONFIG_JFFS2_FS_NAND
|
278 |
|
|
int cleanmarkerfound = 0;
|
279 |
|
|
#endif
|
280 |
|
|
|
281 |
|
|
ofs = jeb->offset;
|
282 |
|
|
prevofs = jeb->offset - 1;
|
283 |
|
|
|
284 |
|
|
D1(printk(KERN_DEBUG "jffs2_scan_eraseblock(): Scanning block at 0x%x\n", ofs));
|
285 |
|
|
|
286 |
|
|
#ifdef CONFIG_JFFS2_FS_NAND
|
287 |
|
|
if (jffs2_cleanmarker_oob(c)) {
|
288 |
|
|
int ret = jffs2_check_nand_cleanmarker(c, jeb);
|
289 |
|
|
D2(printk(KERN_NOTICE "jffs_check_nand_cleanmarker returned %d\n",ret));
|
290 |
|
|
/* Even if it's not found, we still scan to see
|
291 |
|
|
if the block is empty. We use this information
|
292 |
|
|
to decide whether to erase it or not. */
|
293 |
|
|
switch (ret) {
|
294 |
|
|
case 0: cleanmarkerfound = 1; break;
|
295 |
|
|
case 1: break;
|
296 |
|
|
case 2: return BLK_STATE_BADBLOCK;
|
297 |
|
|
case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
|
298 |
|
|
default: return ret;
|
299 |
|
|
}
|
300 |
|
|
}
|
301 |
|
|
#endif
|
302 |
|
|
buf_ofs = jeb->offset;
|
303 |
|
|
|
304 |
|
|
if (!buf_size) {
|
305 |
|
|
buf_len = c->sector_size;
|
306 |
|
|
} else {
|
307 |
|
|
buf_len = EMPTY_SCAN_SIZE;
|
308 |
|
|
err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
|
309 |
|
|
if (err)
|
310 |
|
|
return err;
|
311 |
|
|
}
|
312 |
|
|
|
313 |
|
|
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
|
314 |
|
|
ofs = 0;
|
315 |
|
|
|
316 |
|
|
/* Scan only 4KiB of 0xFF before declaring it's empty */
|
317 |
|
|
while(ofs < EMPTY_SCAN_SIZE && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
|
318 |
|
|
ofs += 4;
|
319 |
|
|
|
320 |
|
|
if (ofs == EMPTY_SCAN_SIZE) {
|
321 |
|
|
#ifdef CONFIG_JFFS2_FS_NAND
|
322 |
|
|
if (jffs2_cleanmarker_oob(c)) {
|
323 |
|
|
/* scan oob, take care of cleanmarker */
|
324 |
|
|
int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
|
325 |
|
|
D2(printk(KERN_NOTICE "jffs2_check_oob_empty returned %d\n",ret));
|
326 |
|
|
switch (ret) {
|
327 |
|
|
case 0: return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
|
328 |
|
|
case 1: return BLK_STATE_ALLDIRTY;
|
329 |
|
|
case 2: return BLK_STATE_BADBLOCK; /* case 2/3 are paranoia checks */
|
330 |
|
|
case 3: return BLK_STATE_ALLDIRTY; /* Block has failed to erase min. once */
|
331 |
|
|
default: return ret;
|
332 |
|
|
}
|
333 |
|
|
}
|
334 |
|
|
#endif
|
335 |
|
|
D1(printk(KERN_DEBUG "Block at 0x%08x is empty (erased)\n", jeb->offset));
|
336 |
|
|
return BLK_STATE_ALLFF; /* OK to erase if all blocks are like this */
|
337 |
|
|
}
|
338 |
|
|
if (ofs) {
|
339 |
|
|
D1(printk(KERN_DEBUG "Free space at %08x ends at %08x\n", jeb->offset,
|
340 |
|
|
jeb->offset + ofs));
|
341 |
|
|
DIRTY_SPACE(ofs);
|
342 |
|
|
}
|
343 |
|
|
|
344 |
|
|
/* Now ofs is a complete physical flash offset as it always was... */
|
345 |
|
|
ofs += jeb->offset;
|
346 |
|
|
|
347 |
|
|
noise = 10;
|
348 |
|
|
|
349 |
|
|
while(ofs < jeb->offset + c->sector_size) {
|
350 |
|
|
|
351 |
|
|
D1(ACCT_PARANOIA_CHECK(jeb));
|
352 |
|
|
|
353 |
|
|
cond_resched();
|
354 |
|
|
|
355 |
|
|
if (ofs & 3) {
|
356 |
|
|
printk(KERN_WARNING "Eep. ofs 0x%08x not word-aligned!\n", ofs);
|
357 |
|
|
ofs = (ofs+3)&~3;
|
358 |
|
|
continue;
|
359 |
|
|
}
|
360 |
|
|
if (ofs == prevofs) {
|
361 |
|
|
printk(KERN_WARNING "ofs 0x%08x has already been seen. Skipping\n", ofs);
|
362 |
|
|
DIRTY_SPACE(4);
|
363 |
|
|
ofs += 4;
|
364 |
|
|
continue;
|
365 |
|
|
}
|
366 |
|
|
prevofs = ofs;
|
367 |
|
|
|
368 |
|
|
if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
|
369 |
|
|
D1(printk(KERN_DEBUG "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n", sizeof(struct jffs2_unknown_node),
|
370 |
|
|
jeb->offset, c->sector_size, ofs, sizeof(*node)));
|
371 |
|
|
DIRTY_SPACE((jeb->offset + c->sector_size)-ofs);
|
372 |
|
|
break;
|
373 |
|
|
}
|
374 |
|
|
|
375 |
|
|
if (buf_ofs + buf_len < ofs + sizeof(*node)) {
|
376 |
|
|
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
|
377 |
|
|
D1(printk(KERN_DEBUG "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
|
378 |
|
|
sizeof(struct jffs2_unknown_node), buf_len, ofs));
|
379 |
|
|
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
|
380 |
|
|
if (err)
|
381 |
|
|
return err;
|
382 |
|
|
buf_ofs = ofs;
|
383 |
|
|
}
|
384 |
|
|
|
385 |
|
|
node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
|
386 |
|
|
|
387 |
|
|
if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
|
388 |
|
|
uint32_t inbuf_ofs = ofs - buf_ofs + 4;
|
389 |
|
|
uint32_t scanend;
|
390 |
|
|
|
391 |
|
|
empty_start = ofs;
|
392 |
|
|
ofs += 4;
|
393 |
|
|
|
394 |
|
|
/* If scanning empty space after only a cleanmarker, don't
|
395 |
|
|
bother scanning the whole block */
|
396 |
|
|
if (unlikely(empty_start == jeb->offset + c->cleanmarker_size &&
|
397 |
|
|
jeb->offset + EMPTY_SCAN_SIZE < buf_ofs + buf_len))
|
398 |
|
|
scanend = jeb->offset + EMPTY_SCAN_SIZE - buf_ofs;
|
399 |
|
|
else
|
400 |
|
|
scanend = buf_len;
|
401 |
|
|
|
402 |
|
|
D1(printk(KERN_DEBUG "Found empty flash at 0x%08x\n", ofs));
|
403 |
|
|
while (inbuf_ofs < scanend) {
|
404 |
|
|
if (*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)
|
405 |
|
|
goto emptyends;
|
406 |
|
|
|
407 |
|
|
inbuf_ofs+=4;
|
408 |
|
|
ofs += 4;
|
409 |
|
|
}
|
410 |
|
|
/* Ran off end. */
|
411 |
|
|
D1(printk(KERN_DEBUG "Empty flash ends normally at 0x%08x\n", ofs));
|
412 |
|
|
|
413 |
|
|
if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
|
414 |
|
|
!jeb->first_node->next_in_ino && !jeb->dirty_size)
|
415 |
|
|
return BLK_STATE_CLEANMARKER;
|
416 |
|
|
wasempty = 1;
|
417 |
|
|
continue;
|
418 |
|
|
} else if (wasempty) {
|
419 |
|
|
emptyends:
|
420 |
|
|
printk(KERN_WARNING "Empty flash at 0x%08x ends at 0x%08x\n", empty_start, ofs);
|
421 |
|
|
DIRTY_SPACE(ofs-empty_start);
|
422 |
|
|
wasempty = 0;
|
423 |
|
|
continue;
|
424 |
|
|
}
|
425 |
|
|
|
426 |
|
|
if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
|
427 |
|
|
printk(KERN_WARNING "Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n", ofs);
|
428 |
|
|
DIRTY_SPACE(4);
|
429 |
|
|
ofs += 4;
|
430 |
|
|
continue;
|
431 |
|
|
}
|
432 |
|
|
if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
|
433 |
|
|
D1(printk(KERN_DEBUG "Empty bitmask at 0x%08x\n", ofs));
|
434 |
|
|
DIRTY_SPACE(4);
|
435 |
|
|
ofs += 4;
|
436 |
|
|
continue;
|
437 |
|
|
}
|
438 |
|
|
if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
|
439 |
|
|
printk(KERN_WARNING "Old JFFS2 bitmask found at 0x%08x\n", ofs);
|
440 |
|
|
printk(KERN_WARNING "You cannot use older JFFS2 filesystems with newer kernels\n");
|
441 |
|
|
DIRTY_SPACE(4);
|
442 |
|
|
ofs += 4;
|
443 |
|
|
continue;
|
444 |
|
|
}
|
445 |
|
|
if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
|
446 |
|
|
/* OK. We're out of possibilities. Whinge and move on */
|
447 |
|
|
noisy_printk(&noise, "jffs2_scan_eraseblock(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
|
448 |
|
|
JFFS2_MAGIC_BITMASK, ofs,
|
449 |
|
|
je16_to_cpu(node->magic));
|
450 |
|
|
DIRTY_SPACE(4);
|
451 |
|
|
ofs += 4;
|
452 |
|
|
continue;
|
453 |
|
|
}
|
454 |
|
|
/* We seem to have a node of sorts. Check the CRC */
|
455 |
|
|
crcnode.magic = node->magic;
|
456 |
|
|
crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
|
457 |
|
|
crcnode.totlen = node->totlen;
|
458 |
|
|
hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
|
459 |
|
|
|
460 |
|
|
if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
|
461 |
|
|
noisy_printk(&noise, "jffs2_scan_eraseblock(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
|
462 |
|
|
ofs, je16_to_cpu(node->magic),
|
463 |
|
|
je16_to_cpu(node->nodetype),
|
464 |
|
|
je32_to_cpu(node->totlen),
|
465 |
|
|
je32_to_cpu(node->hdr_crc),
|
466 |
|
|
hdr_crc);
|
467 |
|
|
DIRTY_SPACE(4);
|
468 |
|
|
ofs += 4;
|
469 |
|
|
continue;
|
470 |
|
|
}
|
471 |
|
|
|
472 |
|
|
if (ofs + je32_to_cpu(node->totlen) >
|
473 |
|
|
jeb->offset + c->sector_size) {
|
474 |
|
|
/* Eep. Node goes over the end of the erase block. */
|
475 |
|
|
printk(KERN_WARNING "Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
|
476 |
|
|
ofs, je32_to_cpu(node->totlen));
|
477 |
|
|
printk(KERN_WARNING "Perhaps the file system was created with the wrong erase size?\n");
|
478 |
|
|
DIRTY_SPACE(4);
|
479 |
|
|
ofs += 4;
|
480 |
|
|
continue;
|
481 |
|
|
}
|
482 |
|
|
|
483 |
|
|
if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
|
484 |
|
|
/* Wheee. This is an obsoleted node */
|
485 |
|
|
D2(printk(KERN_DEBUG "Node at 0x%08x is obsolete. Skipping\n", ofs));
|
486 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
|
487 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
488 |
|
|
continue;
|
489 |
|
|
}
|
490 |
|
|
|
491 |
|
|
switch(je16_to_cpu(node->nodetype)) {
|
492 |
|
|
case JFFS2_NODETYPE_INODE:
|
493 |
|
|
if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
|
494 |
|
|
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
|
495 |
|
|
D1(printk(KERN_DEBUG "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
|
496 |
|
|
sizeof(struct jffs2_raw_inode), buf_len, ofs));
|
497 |
|
|
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
|
498 |
|
|
if (err)
|
499 |
|
|
return err;
|
500 |
|
|
buf_ofs = ofs;
|
501 |
|
|
node = (void *)buf;
|
502 |
|
|
}
|
503 |
|
|
err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs);
|
504 |
|
|
if (err) return err;
|
505 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
506 |
|
|
break;
|
507 |
|
|
|
508 |
|
|
case JFFS2_NODETYPE_DIRENT:
|
509 |
|
|
if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
|
510 |
|
|
buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
|
511 |
|
|
D1(printk(KERN_DEBUG "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
|
512 |
|
|
je32_to_cpu(node->totlen), buf_len, ofs));
|
513 |
|
|
err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
|
514 |
|
|
if (err)
|
515 |
|
|
return err;
|
516 |
|
|
buf_ofs = ofs;
|
517 |
|
|
node = (void *)buf;
|
518 |
|
|
}
|
519 |
|
|
err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs);
|
520 |
|
|
if (err) return err;
|
521 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
522 |
|
|
break;
|
523 |
|
|
|
524 |
|
|
case JFFS2_NODETYPE_CLEANMARKER:
|
525 |
|
|
D1(printk(KERN_DEBUG "CLEANMARKER node found at 0x%08x\n", ofs));
|
526 |
|
|
if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
|
527 |
|
|
printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
|
528 |
|
|
ofs, je32_to_cpu(node->totlen), c->cleanmarker_size);
|
529 |
|
|
DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
|
530 |
|
|
ofs += PAD(sizeof(struct jffs2_unknown_node));
|
531 |
|
|
} else if (jeb->first_node) {
|
532 |
|
|
printk(KERN_NOTICE "CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n", ofs, jeb->offset);
|
533 |
|
|
DIRTY_SPACE(PAD(sizeof(struct jffs2_unknown_node)));
|
534 |
|
|
ofs += PAD(sizeof(struct jffs2_unknown_node));
|
535 |
|
|
} else {
|
536 |
|
|
struct jffs2_raw_node_ref *marker_ref = jffs2_alloc_raw_node_ref();
|
537 |
|
|
if (!marker_ref) {
|
538 |
|
|
printk(KERN_NOTICE "Failed to allocate node ref for clean marker\n");
|
539 |
|
|
return -ENOMEM;
|
540 |
|
|
}
|
541 |
|
|
marker_ref->next_in_ino = NULL;
|
542 |
|
|
marker_ref->next_phys = NULL;
|
543 |
|
|
marker_ref->flash_offset = ofs | REF_NORMAL;
|
544 |
|
|
marker_ref->totlen = c->cleanmarker_size;
|
545 |
|
|
jeb->first_node = jeb->last_node = marker_ref;
|
546 |
|
|
|
547 |
|
|
USED_SPACE(PAD(c->cleanmarker_size));
|
548 |
|
|
ofs += PAD(c->cleanmarker_size);
|
549 |
|
|
}
|
550 |
|
|
break;
|
551 |
|
|
|
552 |
|
|
case JFFS2_NODETYPE_PADDING:
|
553 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
|
554 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
555 |
|
|
break;
|
556 |
|
|
|
557 |
|
|
default:
|
558 |
|
|
switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
|
559 |
|
|
case JFFS2_FEATURE_ROCOMPAT:
|
560 |
|
|
printk(KERN_NOTICE "Read-only compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
|
561 |
|
|
c->flags |= JFFS2_SB_FLAG_RO;
|
562 |
|
|
if (!(jffs2_is_readonly(c)))
|
563 |
|
|
return -EROFS;
|
564 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
|
565 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
566 |
|
|
break;
|
567 |
|
|
|
568 |
|
|
case JFFS2_FEATURE_INCOMPAT:
|
569 |
|
|
printk(KERN_NOTICE "Incompatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs);
|
570 |
|
|
return -EINVAL;
|
571 |
|
|
|
572 |
|
|
case JFFS2_FEATURE_RWCOMPAT_DELETE:
|
573 |
|
|
D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
|
574 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(node->totlen)));
|
575 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
576 |
|
|
break;
|
577 |
|
|
|
578 |
|
|
case JFFS2_FEATURE_RWCOMPAT_COPY:
|
579 |
|
|
D1(printk(KERN_NOTICE "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n", je16_to_cpu(node->nodetype), ofs));
|
580 |
|
|
USED_SPACE(PAD(je32_to_cpu(node->totlen)));
|
581 |
|
|
ofs += PAD(je32_to_cpu(node->totlen));
|
582 |
|
|
break;
|
583 |
|
|
}
|
584 |
|
|
}
|
585 |
|
|
}
|
586 |
|
|
|
587 |
|
|
|
588 |
|
|
D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x\n", jeb->offset,
|
589 |
|
|
jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size));
|
590 |
|
|
|
591 |
|
|
/* mark_node_obsolete can add to wasted !! */
|
592 |
|
|
if (jeb->wasted_size) {
|
593 |
|
|
jeb->dirty_size += jeb->wasted_size;
|
594 |
|
|
c->dirty_size += jeb->wasted_size;
|
595 |
|
|
c->wasted_size -= jeb->wasted_size;
|
596 |
|
|
jeb->wasted_size = 0;
|
597 |
|
|
}
|
598 |
|
|
|
599 |
|
|
if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
|
600 |
|
|
&& (!jeb->first_node || jeb->first_node->next_in_ino) )
|
601 |
|
|
return BLK_STATE_CLEANMARKER;
|
602 |
|
|
|
603 |
|
|
/* move blocks with max 4 byte dirty space to cleanlist */
|
604 |
|
|
else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
|
605 |
|
|
c->dirty_size -= jeb->dirty_size;
|
606 |
|
|
c->wasted_size += jeb->dirty_size;
|
607 |
|
|
jeb->wasted_size += jeb->dirty_size;
|
608 |
|
|
jeb->dirty_size = 0;
|
609 |
|
|
return BLK_STATE_CLEAN;
|
610 |
|
|
} else if (jeb->used_size || jeb->unchecked_size)
|
611 |
|
|
return BLK_STATE_PARTDIRTY;
|
612 |
|
|
else
|
613 |
|
|
return BLK_STATE_ALLDIRTY;
|
614 |
|
|
}
|
615 |
|
|
|
616 |
|
|
static struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
|
617 |
|
|
{
|
618 |
|
|
struct jffs2_inode_cache *ic;
|
619 |
|
|
|
620 |
|
|
ic = jffs2_get_ino_cache(c, ino);
|
621 |
|
|
if (ic)
|
622 |
|
|
return ic;
|
623 |
|
|
|
624 |
|
|
ic = jffs2_alloc_inode_cache();
|
625 |
|
|
if (!ic) {
|
626 |
|
|
printk(KERN_NOTICE "jffs2_scan_make_inode_cache(): allocation of inode cache failed\n");
|
627 |
|
|
return NULL;
|
628 |
|
|
}
|
629 |
|
|
memset(ic, 0, sizeof(*ic));
|
630 |
|
|
|
631 |
|
|
ic->ino = ino;
|
632 |
|
|
ic->nodes = (void *)ic;
|
633 |
|
|
jffs2_add_ino_cache(c, ic);
|
634 |
|
|
if (ino == 1)
|
635 |
|
|
ic->nlink=1;
|
636 |
|
|
return ic;
|
637 |
|
|
}
|
638 |
|
|
|
639 |
|
|
static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
640 |
|
|
struct jffs2_raw_inode *ri, uint32_t ofs)
|
641 |
|
|
{
|
642 |
|
|
struct jffs2_raw_node_ref *raw;
|
643 |
|
|
struct jffs2_inode_cache *ic;
|
644 |
|
|
uint32_t ino = je32_to_cpu(ri->ino);
|
645 |
|
|
|
646 |
|
|
D1(printk(KERN_DEBUG "jffs2_scan_inode_node(): Node at 0x%08x\n", ofs));
|
647 |
|
|
|
648 |
|
|
/* We do very little here now. Just check the ino# to which we should attribute
|
649 |
|
|
this node; we can do all the CRC checking etc. later. There's a tradeoff here --
|
650 |
|
|
we used to scan the flash once only, reading everything we want from it into
|
651 |
|
|
memory, then building all our in-core data structures and freeing the extra
|
652 |
|
|
information. Now we allow the first part of the mount to complete a lot quicker,
|
653 |
|
|
but we have to go _back_ to the flash in order to finish the CRC checking, etc.
|
654 |
|
|
Which means that the _full_ amount of time to get to proper write mode with GC
|
655 |
|
|
operational may actually be _longer_ than before. Sucks to be me. */
|
656 |
|
|
|
657 |
|
|
raw = jffs2_alloc_raw_node_ref();
|
658 |
|
|
if (!raw) {
|
659 |
|
|
printk(KERN_NOTICE "jffs2_scan_inode_node(): allocation of node reference failed\n");
|
660 |
|
|
return -ENOMEM;
|
661 |
|
|
}
|
662 |
|
|
|
663 |
|
|
ic = jffs2_get_ino_cache(c, ino);
|
664 |
|
|
if (!ic) {
|
665 |
|
|
/* Inocache get failed. Either we read a bogus ino# or it's just genuinely the
|
666 |
|
|
first node we found for this inode. Do a CRC check to protect against the former
|
667 |
|
|
case */
|
668 |
|
|
uint32_t crc = crc32(0, ri, sizeof(*ri)-8);
|
669 |
|
|
|
670 |
|
|
if (crc != je32_to_cpu(ri->node_crc)) {
|
671 |
|
|
printk(KERN_NOTICE "jffs2_scan_inode_node(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
672 |
|
|
ofs, je32_to_cpu(ri->node_crc), crc);
|
673 |
|
|
/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
|
674 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(ri->totlen)));
|
675 |
|
|
return 0;
|
676 |
|
|
}
|
677 |
|
|
ic = jffs2_scan_make_ino_cache(c, ino);
|
678 |
|
|
if (!ic) {
|
679 |
|
|
jffs2_free_raw_node_ref(raw);
|
680 |
|
|
return -ENOMEM;
|
681 |
|
|
}
|
682 |
|
|
}
|
683 |
|
|
|
684 |
|
|
/* Wheee. It worked */
|
685 |
|
|
|
686 |
|
|
raw->flash_offset = ofs | REF_UNCHECKED;
|
687 |
|
|
raw->totlen = PAD(je32_to_cpu(ri->totlen));
|
688 |
|
|
raw->next_phys = NULL;
|
689 |
|
|
raw->next_in_ino = ic->nodes;
|
690 |
|
|
|
691 |
|
|
ic->nodes = raw;
|
692 |
|
|
if (!jeb->first_node)
|
693 |
|
|
jeb->first_node = raw;
|
694 |
|
|
if (jeb->last_node)
|
695 |
|
|
jeb->last_node->next_phys = raw;
|
696 |
|
|
jeb->last_node = raw;
|
697 |
|
|
|
698 |
|
|
D1(printk(KERN_DEBUG "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
|
699 |
|
|
je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
|
700 |
|
|
je32_to_cpu(ri->offset),
|
701 |
|
|
je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize)));
|
702 |
|
|
|
703 |
|
|
pseudo_random += je32_to_cpu(ri->version);
|
704 |
|
|
|
705 |
|
|
UNCHECKED_SPACE(PAD(je32_to_cpu(ri->totlen)));
|
706 |
|
|
return 0;
|
707 |
|
|
}
|
708 |
|
|
|
709 |
|
|
static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
|
710 |
|
|
struct jffs2_raw_dirent *rd, uint32_t ofs)
|
711 |
|
|
{
|
712 |
|
|
struct jffs2_raw_node_ref *raw;
|
713 |
|
|
struct jffs2_full_dirent *fd;
|
714 |
|
|
struct jffs2_inode_cache *ic;
|
715 |
|
|
uint32_t crc;
|
716 |
|
|
|
717 |
|
|
D1(printk(KERN_DEBUG "jffs2_scan_dirent_node(): Node at 0x%08x\n", ofs));
|
718 |
|
|
|
719 |
|
|
/* We don't get here unless the node is still valid, so we don't have to
|
720 |
|
|
mask in the ACCURATE bit any more. */
|
721 |
|
|
crc = crc32(0, rd, sizeof(*rd)-8);
|
722 |
|
|
|
723 |
|
|
if (crc != je32_to_cpu(rd->node_crc)) {
|
724 |
|
|
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
725 |
|
|
ofs, je32_to_cpu(rd->node_crc), crc);
|
726 |
|
|
/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
|
727 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
|
728 |
|
|
return 0;
|
729 |
|
|
}
|
730 |
|
|
|
731 |
|
|
pseudo_random += je32_to_cpu(rd->version);
|
732 |
|
|
|
733 |
|
|
fd = jffs2_alloc_full_dirent(rd->nsize+1);
|
734 |
|
|
if (!fd) {
|
735 |
|
|
return -ENOMEM;
|
736 |
|
|
}
|
737 |
|
|
memcpy(&fd->name, rd->name, rd->nsize);
|
738 |
|
|
fd->name[rd->nsize] = 0;
|
739 |
|
|
|
740 |
|
|
crc = crc32(0, fd->name, rd->nsize);
|
741 |
|
|
if (crc != je32_to_cpu(rd->name_crc)) {
|
742 |
|
|
printk(KERN_NOTICE "jffs2_scan_dirent_node(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
|
743 |
|
|
ofs, je32_to_cpu(rd->name_crc), crc);
|
744 |
|
|
D1(printk(KERN_NOTICE "Name for which CRC failed is (now) '%s', ino #%d\n", fd->name, je32_to_cpu(rd->ino)));
|
745 |
|
|
jffs2_free_full_dirent(fd);
|
746 |
|
|
/* FIXME: Why do we believe totlen? */
|
747 |
|
|
/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
|
748 |
|
|
DIRTY_SPACE(PAD(je32_to_cpu(rd->totlen)));
|
749 |
|
|
return 0;
|
750 |
|
|
}
|
751 |
|
|
raw = jffs2_alloc_raw_node_ref();
|
752 |
|
|
if (!raw) {
|
753 |
|
|
jffs2_free_full_dirent(fd);
|
754 |
|
|
printk(KERN_NOTICE "jffs2_scan_dirent_node(): allocation of node reference failed\n");
|
755 |
|
|
return -ENOMEM;
|
756 |
|
|
}
|
757 |
|
|
ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
|
758 |
|
|
if (!ic) {
|
759 |
|
|
jffs2_free_full_dirent(fd);
|
760 |
|
|
jffs2_free_raw_node_ref(raw);
|
761 |
|
|
return -ENOMEM;
|
762 |
|
|
}
|
763 |
|
|
|
764 |
|
|
raw->totlen = PAD(je32_to_cpu(rd->totlen));
|
765 |
|
|
raw->flash_offset = ofs | REF_PRISTINE;
|
766 |
|
|
raw->next_phys = NULL;
|
767 |
|
|
raw->next_in_ino = ic->nodes;
|
768 |
|
|
ic->nodes = raw;
|
769 |
|
|
if (!jeb->first_node)
|
770 |
|
|
jeb->first_node = raw;
|
771 |
|
|
if (jeb->last_node)
|
772 |
|
|
jeb->last_node->next_phys = raw;
|
773 |
|
|
jeb->last_node = raw;
|
774 |
|
|
|
775 |
|
|
fd->raw = raw;
|
776 |
|
|
fd->next = NULL;
|
777 |
|
|
fd->version = je32_to_cpu(rd->version);
|
778 |
|
|
fd->ino = je32_to_cpu(rd->ino);
|
779 |
|
|
fd->nhash = full_name_hash(fd->name, rd->nsize);
|
780 |
|
|
fd->type = rd->type;
|
781 |
|
|
USED_SPACE(PAD(je32_to_cpu(rd->totlen)));
|
782 |
|
|
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
|
783 |
|
|
|
784 |
|
|
return 0;
|
785 |
|
|
}
|
786 |
|
|
|
787 |
|
|
static int count_list(struct list_head *l)
|
788 |
|
|
{
|
789 |
|
|
uint32_t count = 0;
|
790 |
|
|
struct list_head *tmp;
|
791 |
|
|
|
792 |
|
|
list_for_each(tmp, l) {
|
793 |
|
|
count++;
|
794 |
|
|
}
|
795 |
|
|
return count;
|
796 |
|
|
}
|
797 |
|
|
|
798 |
|
|
/* Note: This breaks if list_empty(head). I don't care. You
|
799 |
|
|
might, if you copy this code and use it elsewhere :) */
|
800 |
|
|
static void rotate_list(struct list_head *head, uint32_t count)
|
801 |
|
|
{
|
802 |
|
|
struct list_head *n = head->next;
|
803 |
|
|
|
804 |
|
|
list_del(head);
|
805 |
|
|
while(count--) {
|
806 |
|
|
n = n->next;
|
807 |
|
|
}
|
808 |
|
|
list_add(head, n);
|
809 |
|
|
}
|
810 |
|
|
|
811 |
|
|
void jffs2_rotate_lists(struct jffs2_sb_info *c)
|
812 |
|
|
{
|
813 |
|
|
uint32_t x;
|
814 |
|
|
uint32_t rotateby;
|
815 |
|
|
|
816 |
|
|
x = count_list(&c->clean_list);
|
817 |
|
|
if (x) {
|
818 |
|
|
rotateby = pseudo_random % x;
|
819 |
|
|
D1(printk(KERN_DEBUG "Rotating clean_list by %d\n", rotateby));
|
820 |
|
|
|
821 |
|
|
rotate_list((&c->clean_list), rotateby);
|
822 |
|
|
|
823 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of clean_list is at %08x\n",
|
824 |
|
|
list_entry(c->clean_list.next, struct jffs2_eraseblock, list)->offset));
|
825 |
|
|
} else {
|
826 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty clean_list\n"));
|
827 |
|
|
}
|
828 |
|
|
|
829 |
|
|
x = count_list(&c->very_dirty_list);
|
830 |
|
|
if (x) {
|
831 |
|
|
rotateby = pseudo_random % x;
|
832 |
|
|
D1(printk(KERN_DEBUG "Rotating very_dirty_list by %d\n", rotateby));
|
833 |
|
|
|
834 |
|
|
rotate_list((&c->very_dirty_list), rotateby);
|
835 |
|
|
|
836 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of very_dirty_list is at %08x\n",
|
837 |
|
|
list_entry(c->very_dirty_list.next, struct jffs2_eraseblock, list)->offset));
|
838 |
|
|
} else {
|
839 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty very_dirty_list\n"));
|
840 |
|
|
}
|
841 |
|
|
|
842 |
|
|
x = count_list(&c->dirty_list);
|
843 |
|
|
if (x) {
|
844 |
|
|
rotateby = pseudo_random % x;
|
845 |
|
|
D1(printk(KERN_DEBUG "Rotating dirty_list by %d\n", rotateby));
|
846 |
|
|
|
847 |
|
|
rotate_list((&c->dirty_list), rotateby);
|
848 |
|
|
|
849 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of dirty_list is at %08x\n",
|
850 |
|
|
list_entry(c->dirty_list.next, struct jffs2_eraseblock, list)->offset));
|
851 |
|
|
} else {
|
852 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty dirty_list\n"));
|
853 |
|
|
}
|
854 |
|
|
|
855 |
|
|
x = count_list(&c->erasable_list);
|
856 |
|
|
if (x) {
|
857 |
|
|
rotateby = pseudo_random % x;
|
858 |
|
|
D1(printk(KERN_DEBUG "Rotating erasable_list by %d\n", rotateby));
|
859 |
|
|
|
860 |
|
|
rotate_list((&c->erasable_list), rotateby);
|
861 |
|
|
|
862 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of erasable_list is at %08x\n",
|
863 |
|
|
list_entry(c->erasable_list.next, struct jffs2_eraseblock, list)->offset));
|
864 |
|
|
} else {
|
865 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty erasable_list\n"));
|
866 |
|
|
}
|
867 |
|
|
|
868 |
|
|
if (c->nr_erasing_blocks) {
|
869 |
|
|
rotateby = pseudo_random % c->nr_erasing_blocks;
|
870 |
|
|
D1(printk(KERN_DEBUG "Rotating erase_pending_list by %d\n", rotateby));
|
871 |
|
|
|
872 |
|
|
rotate_list((&c->erase_pending_list), rotateby);
|
873 |
|
|
|
874 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of erase_pending_list is at %08x\n",
|
875 |
|
|
list_entry(c->erase_pending_list.next, struct jffs2_eraseblock, list)->offset));
|
876 |
|
|
} else {
|
877 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty erase_pending_list\n"));
|
878 |
|
|
}
|
879 |
|
|
|
880 |
|
|
if (c->nr_free_blocks) {
|
881 |
|
|
rotateby = pseudo_random % c->nr_free_blocks;
|
882 |
|
|
D1(printk(KERN_DEBUG "Rotating free_list by %d\n", rotateby));
|
883 |
|
|
|
884 |
|
|
rotate_list((&c->free_list), rotateby);
|
885 |
|
|
|
886 |
|
|
D1(printk(KERN_DEBUG "Erase block at front of free_list is at %08x\n",
|
887 |
|
|
list_entry(c->free_list.next, struct jffs2_eraseblock, list)->offset));
|
888 |
|
|
} else {
|
889 |
|
|
D1(printk(KERN_DEBUG "Not rotating empty free_list\n"));
|
890 |
|
|
}
|
891 |
|
|
}
|