OpenCores
URL https://opencores.org/ocsvn/eco32/eco32/trunk

Subversion Repositories eco32

[/] [eco32/] [tags/] [eco32-0.24/] [disk/] [tools/] [fs-NetBSD/] [makefs/] [ffs_alloc.c] - Blame information for rev 211

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 17 hellwig
/*      $NetBSD: ffs_alloc.c,v 1.18 2011/03/06 17:08:42 bouyer Exp $    */
2
/* From: NetBSD: ffs_alloc.c,v 1.50 2001/09/06 02:16:01 lukem Exp */
3
 
4
/*
5
 * Copyright (c) 2002 Networks Associates Technology, Inc.
6
 * All rights reserved.
7
 *
8
 * This software was developed for the FreeBSD Project by Marshall
9
 * Kirk McKusick and Network Associates Laboratories, the Security
10
 * Research Division of Network Associates, Inc. under DARPA/SPAWAR
11
 * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
12
 * research program
13
 *
14
 * Copyright (c) 1982, 1986, 1989, 1993
15
 *      The Regents of the University of California.  All rights reserved.
16
 *
17
 * Redistribution and use in source and binary forms, with or without
18
 * modification, are permitted provided that the following conditions
19
 * are met:
20
 * 1. Redistributions of source code must retain the above copyright
21
 *    notice, this list of conditions and the following disclaimer.
22
 * 2. Redistributions in binary form must reproduce the above copyright
23
 *    notice, this list of conditions and the following disclaimer in the
24
 *    documentation and/or other materials provided with the distribution.
25
 * 3. Neither the name of the University nor the names of its contributors
26
 *    may be used to endorse or promote products derived from this software
27
 *    without specific prior written permission.
28
 *
29
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39
 * SUCH DAMAGE.
40
 *
41
 *      @(#)ffs_alloc.c 8.19 (Berkeley) 7/13/95
42
 */
43
 
44
#if HAVE_NBTOOL_CONFIG_H
45
#include "nbtool_config.h"
46
#endif
47
 
48
#include <sys/cdefs.h>
49
#if defined(__RCSID) && !defined(__lint)
50
__RCSID("$NetBSD: ffs_alloc.c,v 1.18 2011/03/06 17:08:42 bouyer Exp $");
51
#endif  /* !__lint */
52
 
53
#include <sys/param.h>
54
#include <sys/time.h>
55
 
56
#include <errno.h>
57
 
58
#include "common.h"
59
#include "makefs.h"
60
 
61
#include "dinode.h"
62
#include "ufs_bswap.h"
63
#include "fs.h"
64
 
65
#include "buf.h"
66
#include "ufs_inode.h"
67
#include "ffs_extern.h"
68
 
69
 
70
static int scanc(u_int, const u_char *, const u_char *, int);
71
 
72
static daddr_t ffs_alloccg(struct inode *, int, daddr_t, int);
73
static daddr_t ffs_alloccgblk(struct inode *, struct buf *, daddr_t);
74
static daddr_t ffs_hashalloc(struct inode *, int, daddr_t, int,
75
                     daddr_t (*)(struct inode *, int, daddr_t, int));
76
static int32_t ffs_mapsearch(struct fs *, struct cg *, daddr_t, int);
77
 
78
/* in ffs_tables.c */
79
extern const int inside[], around[];
80
extern const u_char * const fragtbl[];
81
 
82
/*
83
 * Allocate a block in the file system.
84
 *
85
 * The size of the requested block is given, which must be some
86
 * multiple of fs_fsize and <= fs_bsize.
87
 * A preference may be optionally specified. If a preference is given
88
 * the following hierarchy is used to allocate a block:
89
 *   1) allocate the requested block.
90
 *   2) allocate a rotationally optimal block in the same cylinder.
91
 *   3) allocate a block in the same cylinder group.
92
 *   4) quadradically rehash into other cylinder groups, until an
93
 *      available block is located.
94
 * If no block preference is given the following hierarchy is used
95
 * to allocate a block:
96
 *   1) allocate a block in the cylinder group that contains the
97
 *      inode for the file.
98
 *   2) quadradically rehash into other cylinder groups, until an
99
 *      available block is located.
100
 */
101
int
102
ffs_alloc(struct inode *ip, daddr_t lbn, daddr_t bpref, int size,
103
    daddr_t *bnp)
104
{
105
        struct fs *fs = ip->i_fs;
106
        daddr_t bno;
107
        int cg;
108
 
109
        *bnp = 0;
110
        if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0) {
111
                errx(1, "ffs_alloc: bad size: bsize %d size %d",
112
                    fs->fs_bsize, size);
113
        }
114
        if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0)
115
                goto nospace;
116
        if (bpref >= fs->fs_size)
117
                bpref = 0;
118
        if (bpref == 0)
119
                cg = ino_to_cg(fs, ip->i_number);
120
        else
121
                cg = dtog(fs, bpref);
122
        bno = ffs_hashalloc(ip, cg, bpref, size, ffs_alloccg);
123
        if (bno > 0) {
124
                DIP_ADD(ip, blocks, size / DEV_BSIZE);
125
                *bnp = bno;
126
                return (0);
127
        }
128
nospace:
129
        return (ENOSPC);
130
}
131
 
132
/*
133
 * Select the desired position for the next block in a file.  The file is
134
 * logically divided into sections. The first section is composed of the
135
 * direct blocks. Each additional section contains fs_maxbpg blocks.
136
 *
137
 * If no blocks have been allocated in the first section, the policy is to
138
 * request a block in the same cylinder group as the inode that describes
139
 * the file. If no blocks have been allocated in any other section, the
140
 * policy is to place the section in a cylinder group with a greater than
141
 * average number of free blocks.  An appropriate cylinder group is found
142
 * by using a rotor that sweeps the cylinder groups. When a new group of
143
 * blocks is needed, the sweep begins in the cylinder group following the
144
 * cylinder group from which the previous allocation was made. The sweep
145
 * continues until a cylinder group with greater than the average number
146
 * of free blocks is found. If the allocation is for the first block in an
147
 * indirect block, the information on the previous allocation is unavailable;
148
 * here a best guess is made based upon the logical block number being
149
 * allocated.
150
 *
151
 * If a section is already partially allocated, the policy is to
152
 * contiguously allocate fs_maxcontig blocks.  The end of one of these
153
 * contiguous blocks and the beginning of the next is physically separated
154
 * so that the disk head will be in transit between them for at least
155
 * fs_rotdelay milliseconds.  This is to allow time for the processor to
156
 * schedule another I/O transfer.
157
 */
158
/* XXX ondisk32 */
159
daddr_t
160
ffs_blkpref_ufs1(struct inode *ip, daddr_t lbn, int indx, int32_t *bap)
161
{
162
        struct fs *fs;
163
        int cg;
164
        int avgbfree, startcg;
165
 
166
        fs = ip->i_fs;
167
        if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
168
                if (lbn < NDADDR + NINDIR(fs)) {
169
                        cg = ino_to_cg(fs, ip->i_number);
170
                        return (fs->fs_fpg * cg + fs->fs_frag);
171
                }
172
                /*
173
                 * Find a cylinder with greater than average number of
174
                 * unused data blocks.
175
                 */
176
                if (indx == 0 || bap[indx - 1] == 0)
177
                        startcg =
178
                            ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
179
                else
180
                        startcg = dtog(fs,
181
                                ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
182
                startcg %= fs->fs_ncg;
183
                avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
184
                for (cg = startcg; cg < fs->fs_ncg; cg++)
185
                        if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
186
                                return (fs->fs_fpg * cg + fs->fs_frag);
187
                for (cg = 0; cg <= startcg; cg++)
188
                        if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree)
189
                                return (fs->fs_fpg * cg + fs->fs_frag);
190
                return (0);
191
        }
192
        /*
193
         * We just always try to lay things out contiguously.
194
         */
195
        return ufs_rw32(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
196
}
197
 
198
daddr_t
199
ffs_blkpref_ufs2(ip, lbn, indx, bap)
200
        struct inode *ip;
201
        daddr_t lbn;
202
        int indx;
203
        int64_t *bap;
204
{
205
        struct fs *fs;
206
        int cg;
207
        int avgbfree, startcg;
208
 
209
        fs = ip->i_fs;
210
        if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) {
211
                if (lbn < NDADDR + NINDIR(fs)) {
212
                        cg = ino_to_cg(fs, ip->i_number);
213
                        return (fs->fs_fpg * cg + fs->fs_frag);
214
                }
215
                /*
216
                 * Find a cylinder with greater than average number of
217
                 * unused data blocks.
218
                 */
219
                if (indx == 0 || bap[indx - 1] == 0)
220
                        startcg =
221
                            ino_to_cg(fs, ip->i_number) + lbn / fs->fs_maxbpg;
222
                else
223
                        startcg = dtog(fs,
224
                                ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + 1);
225
                startcg %= fs->fs_ncg;
226
                avgbfree = fs->fs_cstotal.cs_nbfree / fs->fs_ncg;
227
                for (cg = startcg; cg < fs->fs_ncg; cg++)
228
                        if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
229
                                return (fs->fs_fpg * cg + fs->fs_frag);
230
                        }
231
                for (cg = 0; cg < startcg; cg++)
232
                        if (fs->fs_cs(fs, cg).cs_nbfree >= avgbfree) {
233
                                return (fs->fs_fpg * cg + fs->fs_frag);
234
                        }
235
                return (0);
236
        }
237
        /*
238
         * We just always try to lay things out contiguously.
239
         */
240
        return ufs_rw64(bap[indx - 1], UFS_FSNEEDSWAP(fs)) + fs->fs_frag;
241
}
242
 
243
/*
244
 * Implement the cylinder overflow algorithm.
245
 *
246
 * The policy implemented by this algorithm is:
247
 *   1) allocate the block in its requested cylinder group.
248
 *   2) quadradically rehash on the cylinder group number.
249
 *   3) brute force search for a free block.
250
 *
251
 * `size':      size for data blocks, mode for inodes
252
 */
253
/*VARARGS5*/
254
static daddr_t
255
ffs_hashalloc(struct inode *ip, int cg, daddr_t pref, int size,
256
    daddr_t (*allocator)(struct inode *, int, daddr_t, int))
257
{
258
        struct fs *fs;
259
        daddr_t result;
260
        int i, icg = cg;
261
 
262
        fs = ip->i_fs;
263
        /*
264
         * 1: preferred cylinder group
265
         */
266
        result = (*allocator)(ip, cg, pref, size);
267
        if (result)
268
                return (result);
269
        /*
270
         * 2: quadratic rehash
271
         */
272
        for (i = 1; i < fs->fs_ncg; i *= 2) {
273
                cg += i;
274
                if (cg >= fs->fs_ncg)
275
                        cg -= fs->fs_ncg;
276
                result = (*allocator)(ip, cg, 0, size);
277
                if (result)
278
                        return (result);
279
        }
280
        /*
281
         * 3: brute force search
282
         * Note that we start at i == 2, since 0 was checked initially,
283
         * and 1 is always checked in the quadratic rehash.
284
         */
285
        cg = (icg + 2) % fs->fs_ncg;
286
        for (i = 2; i < fs->fs_ncg; i++) {
287
                result = (*allocator)(ip, cg, 0, size);
288
                if (result)
289
                        return (result);
290
                cg++;
291
                if (cg == fs->fs_ncg)
292
                        cg = 0;
293
        }
294
        return (0);
295
}
296
 
297
/*
298
 * Determine whether a block can be allocated.
299
 *
300
 * Check to see if a block of the appropriate size is available,
301
 * and if it is, allocate it.
302
 */
303
static daddr_t
304
ffs_alloccg(struct inode *ip, int cg, daddr_t bpref, int size)
305
{
306
        struct cg *cgp;
307
        struct buf *bp;
308
        daddr_t bno, blkno;
309
        int error, frags, allocsiz, i;
310
        struct fs *fs = ip->i_fs;
311
        const int needswap = UFS_FSNEEDSWAP(fs);
312
 
313
        if (fs->fs_cs(fs, cg).cs_nbfree == 0 && size == fs->fs_bsize)
314
                return (0);
315
        error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
316
                (int)fs->fs_cgsize, &bp);
317
        if (error) {
318
                brelse(bp);
319
                return (0);
320
        }
321
        cgp = (struct cg *)bp->b_data;
322
        if (!cg_chkmagic(cgp, needswap) ||
323
            (cgp->cg_cs.cs_nbfree == 0 && size == fs->fs_bsize)) {
324
                brelse(bp);
325
                return (0);
326
        }
327
        if (size == fs->fs_bsize) {
328
                bno = ffs_alloccgblk(ip, bp, bpref);
329
                bdwrite(bp);
330
                return (bno);
331
        }
332
        /*
333
         * check to see if any fragments are already available
334
         * allocsiz is the size which will be allocated, hacking
335
         * it down to a smaller size if necessary
336
         */
337
        frags = numfrags(fs, size);
338
        for (allocsiz = frags; allocsiz < fs->fs_frag; allocsiz++)
339
                if (cgp->cg_frsum[allocsiz] != 0)
340
                        break;
341
        if (allocsiz == fs->fs_frag) {
342
                /*
343
                 * no fragments were available, so a block will be
344
                 * allocated, and hacked up
345
                 */
346
                if (cgp->cg_cs.cs_nbfree == 0) {
347
                        brelse(bp);
348
                        return (0);
349
                }
350
                bno = ffs_alloccgblk(ip, bp, bpref);
351
                bpref = dtogd(fs, bno);
352
                for (i = frags; i < fs->fs_frag; i++)
353
                        setbit(cg_blksfree(cgp, needswap), bpref + i);
354
                i = fs->fs_frag - frags;
355
                ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
356
                fs->fs_cstotal.cs_nffree += i;
357
                fs->fs_cs(fs, cg).cs_nffree += i;
358
                fs->fs_fmod = 1;
359
                ufs_add32(cgp->cg_frsum[i], 1, needswap);
360
                bdwrite(bp);
361
                return (bno);
362
        }
363
        bno = ffs_mapsearch(fs, cgp, bpref, allocsiz);
364
        for (i = 0; i < frags; i++)
365
                clrbit(cg_blksfree(cgp, needswap), bno + i);
366
        ufs_add32(cgp->cg_cs.cs_nffree, -frags, needswap);
367
        fs->fs_cstotal.cs_nffree -= frags;
368
        fs->fs_cs(fs, cg).cs_nffree -= frags;
369
        fs->fs_fmod = 1;
370
        ufs_add32(cgp->cg_frsum[allocsiz], -1, needswap);
371
        if (frags != allocsiz)
372
                ufs_add32(cgp->cg_frsum[allocsiz - frags], 1, needswap);
373
        blkno = cg * fs->fs_fpg + bno;
374
        bdwrite(bp);
375
        return blkno;
376
}
377
 
378
/*
379
 * Allocate a block in a cylinder group.
380
 *
381
 * This algorithm implements the following policy:
382
 *   1) allocate the requested block.
383
 *   2) allocate a rotationally optimal block in the same cylinder.
384
 *   3) allocate the next available block on the block rotor for the
385
 *      specified cylinder group.
386
 * Note that this routine only allocates fs_bsize blocks; these
387
 * blocks may be fragmented by the routine that allocates them.
388
 */
389
static daddr_t
390
ffs_alloccgblk(struct inode *ip, struct buf *bp, daddr_t bpref)
391
{
392
        struct cg *cgp;
393
        daddr_t blkno;
394
        int32_t bno;
395
        struct fs *fs = ip->i_fs;
396
        const int needswap = UFS_FSNEEDSWAP(fs);
397
        u_int8_t *blksfree;
398
 
399
        cgp = (struct cg *)bp->b_data;
400
        blksfree = cg_blksfree(cgp, needswap);
401
        if (bpref == 0 || dtog(fs, bpref) != ufs_rw32(cgp->cg_cgx, needswap)) {
402
                bpref = ufs_rw32(cgp->cg_rotor, needswap);
403
        } else {
404
                bpref = blknum(fs, bpref);
405
                bno = dtogd(fs, bpref);
406
                /*
407
                 * if the requested block is available, use it
408
                 */
409
                if (ffs_isblock(fs, blksfree, fragstoblks(fs, bno)))
410
                        goto gotit;
411
        }
412
        /*
413
         * Take the next available one in this cylinder group.
414
         */
415
        bno = ffs_mapsearch(fs, cgp, bpref, (int)fs->fs_frag);
416
        if (bno < 0)
417
                return (0);
418
        cgp->cg_rotor = ufs_rw32(bno, needswap);
419
gotit:
420
        blkno = fragstoblks(fs, bno);
421
        ffs_clrblock(fs, blksfree, (long)blkno);
422
        ffs_clusteracct(fs, cgp, blkno, -1);
423
        ufs_add32(cgp->cg_cs.cs_nbfree, -1, needswap);
424
        fs->fs_cstotal.cs_nbfree--;
425
        fs->fs_cs(fs, ufs_rw32(cgp->cg_cgx, needswap)).cs_nbfree--;
426
        fs->fs_fmod = 1;
427
        blkno = ufs_rw32(cgp->cg_cgx, needswap) * fs->fs_fpg + bno;
428
        return (blkno);
429
}
430
 
431
/*
432
 * Free a block or fragment.
433
 *
434
 * The specified block or fragment is placed back in the
435
 * free map. If a fragment is deallocated, a possible
436
 * block reassembly is checked.
437
 */
438
void
439
ffs_blkfree(struct inode *ip, daddr_t bno, long size)
440
{
441
        struct cg *cgp;
442
        struct buf *bp;
443
        int32_t fragno, cgbno;
444
        int i, error, cg, blk, frags, bbase;
445
        struct fs *fs = ip->i_fs;
446
        const int needswap = UFS_FSNEEDSWAP(fs);
447
 
448
        if ((u_int)size > fs->fs_bsize || fragoff(fs, size) != 0 ||
449
            fragnum(fs, bno) + numfrags(fs, size) > fs->fs_frag) {
450
                errx(1, "blkfree: bad size: bno %lld bsize %d size %ld",
451
                    (long long)bno, fs->fs_bsize, size);
452
        }
453
        cg = dtog(fs, bno);
454
        if (bno >= fs->fs_size) {
455
                warnx("bad block %lld, ino %llu", (long long)bno,
456
                    (unsigned long long)ip->i_number);
457
                return;
458
        }
459
        error = bread(ip->i_fd, ip->i_fs, fsbtodb(fs, cgtod(fs, cg)),
460
                (int)fs->fs_cgsize, &bp);
461
        if (error) {
462
                brelse(bp);
463
                return;
464
        }
465
        cgp = (struct cg *)bp->b_data;
466
        if (!cg_chkmagic(cgp, needswap)) {
467
                brelse(bp);
468
                return;
469
        }
470
        cgbno = dtogd(fs, bno);
471
        if (size == fs->fs_bsize) {
472
                fragno = fragstoblks(fs, cgbno);
473
                if (!ffs_isfreeblock(fs, cg_blksfree(cgp, needswap), fragno)) {
474
                        errx(1, "blkfree: freeing free block %lld",
475
                            (long long)bno);
476
                }
477
                ffs_setblock(fs, cg_blksfree(cgp, needswap), fragno);
478
                ffs_clusteracct(fs, cgp, fragno, 1);
479
                ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
480
                fs->fs_cstotal.cs_nbfree++;
481
                fs->fs_cs(fs, cg).cs_nbfree++;
482
        } else {
483
                bbase = cgbno - fragnum(fs, cgbno);
484
                /*
485
                 * decrement the counts associated with the old frags
486
                 */
487
                blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
488
                ffs_fragacct(fs, blk, cgp->cg_frsum, -1, needswap);
489
                /*
490
                 * deallocate the fragment
491
                 */
492
                frags = numfrags(fs, size);
493
                for (i = 0; i < frags; i++) {
494
                        if (isset(cg_blksfree(cgp, needswap), cgbno + i)) {
495
                                errx(1, "blkfree: freeing free frag: block %lld",
496
                                    (long long)(cgbno + i));
497
                        }
498
                        setbit(cg_blksfree(cgp, needswap), cgbno + i);
499
                }
500
                ufs_add32(cgp->cg_cs.cs_nffree, i, needswap);
501
                fs->fs_cstotal.cs_nffree += i;
502
                fs->fs_cs(fs, cg).cs_nffree += i;
503
                /*
504
                 * add back in counts associated with the new frags
505
                 */
506
                blk = blkmap(fs, cg_blksfree(cgp, needswap), bbase);
507
                ffs_fragacct(fs, blk, cgp->cg_frsum, 1, needswap);
508
                /*
509
                 * if a complete block has been reassembled, account for it
510
                 */
511
                fragno = fragstoblks(fs, bbase);
512
                if (ffs_isblock(fs, cg_blksfree(cgp, needswap), fragno)) {
513
                        ufs_add32(cgp->cg_cs.cs_nffree, -fs->fs_frag, needswap);
514
                        fs->fs_cstotal.cs_nffree -= fs->fs_frag;
515
                        fs->fs_cs(fs, cg).cs_nffree -= fs->fs_frag;
516
                        ffs_clusteracct(fs, cgp, fragno, 1);
517
                        ufs_add32(cgp->cg_cs.cs_nbfree, 1, needswap);
518
                        fs->fs_cstotal.cs_nbfree++;
519
                        fs->fs_cs(fs, cg).cs_nbfree++;
520
                }
521
        }
522
        fs->fs_fmod = 1;
523
        bdwrite(bp);
524
}
525
 
526
 
527
static int
528
scanc(u_int size, const u_char *cp, const u_char table[], int mask)
529
{
530
        const u_char *end = &cp[size];
531
 
532
        while (cp < end && (table[*cp] & mask) == 0)
533
                cp++;
534
        return (end - cp);
535
}
536
 
537
/*
538
 * Find a block of the specified size in the specified cylinder group.
539
 *
540
 * It is a panic if a request is made to find a block if none are
541
 * available.
542
 */
543
static int32_t
544
ffs_mapsearch(struct fs *fs, struct cg *cgp, daddr_t bpref, int allocsiz)
545
{
546
        int32_t bno;
547
        int start, len, loc, i;
548
        int blk, field, subfield, pos;
549
        int ostart, olen;
550
        const int needswap = UFS_FSNEEDSWAP(fs);
551
 
552
        /*
553
         * find the fragment by searching through the free block
554
         * map for an appropriate bit pattern
555
         */
556
        if (bpref)
557
                start = dtogd(fs, bpref) / NBBY;
558
        else
559
                start = ufs_rw32(cgp->cg_frotor, needswap) / NBBY;
560
        len = howmany(fs->fs_fpg, NBBY) - start;
561
        ostart = start;
562
        olen = len;
563
        loc = scanc((u_int)len,
564
                (const u_char *)&cg_blksfree(cgp, needswap)[start],
565
                (const u_char *)fragtbl[fs->fs_frag],
566
                (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
567
        if (loc == 0) {
568
                len = start + 1;
569
                start = 0;
570
                loc = scanc((u_int)len,
571
                        (const u_char *)&cg_blksfree(cgp, needswap)[0],
572
                        (const u_char *)fragtbl[fs->fs_frag],
573
                        (1 << (allocsiz - 1 + (fs->fs_frag % NBBY))));
574
                if (loc == 0) {
575
                        errx(1,
576
    "ffs_alloccg: map corrupted: start %d len %d offset %d %ld",
577
                                ostart, olen,
578
                                ufs_rw32(cgp->cg_freeoff, needswap),
579
                                (long)cg_blksfree(cgp, needswap) - (long)cgp);
580
                        /* NOTREACHED */
581
                }
582
        }
583
        bno = (start + len - loc) * NBBY;
584
        cgp->cg_frotor = ufs_rw32(bno, needswap);
585
        /*
586
         * found the byte in the map
587
         * sift through the bits to find the selected frag
588
         */
589
        for (i = bno + NBBY; bno < i; bno += fs->fs_frag) {
590
                blk = blkmap(fs, cg_blksfree(cgp, needswap), bno);
591
                blk <<= 1;
592
                field = around[allocsiz];
593
                subfield = inside[allocsiz];
594
                for (pos = 0; pos <= fs->fs_frag - allocsiz; pos++) {
595
                        if ((blk & field) == subfield)
596
                                return (bno + pos);
597
                        field <<= 1;
598
                        subfield <<= 1;
599
                }
600
        }
601
        errx(1, "ffs_alloccg: block not in map: bno %lld", (long long)bno);
602
        return (-1);
603
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.