1 |
1275 |
phoenix |
/*
|
2 |
|
|
* linux/fs/hfs/bnode.c
|
3 |
|
|
*
|
4 |
|
|
* Copyright (C) 1995-1997 Paul H. Hargrove
|
5 |
|
|
* This file may be distributed under the terms of the GNU General Public License.
|
6 |
|
|
*
|
7 |
|
|
* This file contains the code to access nodes in the B-tree structure.
|
8 |
|
|
*
|
9 |
|
|
* "XXX" in a comment is a note to myself to consider changing something.
|
10 |
|
|
*
|
11 |
|
|
* In function preconditions the term "valid" applied to a pointer to
|
12 |
|
|
* a structure means that the pointer is non-NULL and the structure it
|
13 |
|
|
* points to has all fields initialized to consistent values.
|
14 |
|
|
*
|
15 |
|
|
* The code in this file initializes some structures which contain
|
16 |
|
|
* pointers by calling memset(&foo, 0, sizeof(foo)).
|
17 |
|
|
* This produces the desired behavior only due to the non-ANSI
|
18 |
|
|
* assumption that the machine representation of NULL is all zeros.
|
19 |
|
|
*/
|
20 |
|
|
|
21 |
|
|
#include "hfs_btree.h"
|
22 |
|
|
|
23 |
|
|
/*================ File-local variables ================*/
|
24 |
|
|
|
25 |
|
|
/* debugging statistics */
|
26 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
27 |
|
|
int bnode_count = 0;
|
28 |
|
|
#endif
|
29 |
|
|
|
30 |
|
|
/*================ Global functions ================*/
|
31 |
|
|
|
32 |
|
|
/*
|
33 |
|
|
* hfs_bnode_delete()
|
34 |
|
|
*
|
35 |
|
|
* Description:
|
36 |
|
|
* This function is called to remove a bnode from the cache and
|
37 |
|
|
* release its resources.
|
38 |
|
|
* Input Variable(s):
|
39 |
|
|
* struct hfs_bnode *bn: Pointer to the (struct hfs_bnode) to be
|
40 |
|
|
* removed from the cache.
|
41 |
|
|
* Output Variable(s):
|
42 |
|
|
* NONE
|
43 |
|
|
* Returns:
|
44 |
|
|
* void
|
45 |
|
|
* Preconditions:
|
46 |
|
|
* 'bn' points to a "valid" (struct hfs_bnode).
|
47 |
|
|
* Postconditions:
|
48 |
|
|
* The node 'bn' is removed from the cache, its memory freed and its
|
49 |
|
|
* buffer (if any) released.
|
50 |
|
|
*/
|
51 |
|
|
void hfs_bnode_delete(struct hfs_bnode *bn)
|
52 |
|
|
{
|
53 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
54 |
|
|
--bnode_count;
|
55 |
|
|
#endif
|
56 |
|
|
/* join neighbors */
|
57 |
|
|
if (bn->next) {
|
58 |
|
|
bn->next->prev = bn->prev;
|
59 |
|
|
}
|
60 |
|
|
if (bn->prev) {
|
61 |
|
|
bn->prev->next = bn->next;
|
62 |
|
|
}
|
63 |
|
|
/* fix cache slot if necessary */
|
64 |
|
|
if (bhash(bn->tree, bn->node) == bn) {
|
65 |
|
|
bhash(bn->tree, bn->node) = bn->next;
|
66 |
|
|
}
|
67 |
|
|
/* release resources */
|
68 |
|
|
hfs_buffer_put(bn->buf); /* safe: checks for NULL argument */
|
69 |
|
|
HFS_DELETE(bn);
|
70 |
|
|
}
|
71 |
|
|
|
72 |
|
|
|
73 |
|
|
/*
|
74 |
|
|
* hfs_bnode_read()
|
75 |
|
|
*
|
76 |
|
|
* Description:
|
77 |
|
|
* This function creates a (struct hfs_bnode) and, if appropriate,
|
78 |
|
|
* inserts it in the cache.
|
79 |
|
|
* Input Variable(s):
|
80 |
|
|
* struct hfs_bnode *bnode: pointer to the new bnode.
|
81 |
|
|
* struct hfs_btree *tree: pointer to the (struct hfs_btree)
|
82 |
|
|
* containing the desired node
|
83 |
|
|
* hfs_u32 node: the number of the desired node.
|
84 |
|
|
* int sticky: the value to assign to the 'sticky' field.
|
85 |
|
|
* Output Variable(s):
|
86 |
|
|
* NONE
|
87 |
|
|
* Returns:
|
88 |
|
|
* (struct hfs_bnode *) pointing to the newly created bnode or NULL.
|
89 |
|
|
* Preconditions:
|
90 |
|
|
* 'bnode' points to a "valid" (struct hfs_bnode).
|
91 |
|
|
* 'tree' points to a "valid" (struct hfs_btree).
|
92 |
|
|
* 'node' is an existing node number in the B-tree.
|
93 |
|
|
* Postconditions:
|
94 |
|
|
* The following are true of 'bnode' upon return:
|
95 |
|
|
* The 'magic' field is set to indicate a valid (struct hfs_bnode).
|
96 |
|
|
* The 'sticky', 'tree' and 'node' fields are initialized to the
|
97 |
|
|
* values of the of the corresponding arguments.
|
98 |
|
|
* If the 'sticky' argument is zero then the fields 'prev' and
|
99 |
|
|
* 'next' are initialized by inserting the (struct hfs_bnode) in the
|
100 |
|
|
* linked list of the appropriate cache slot; otherwise they are
|
101 |
|
|
* initialized to NULL.
|
102 |
|
|
* The data is read from disk (or buffer cache) and the 'buf' field
|
103 |
|
|
* points to the buffer for that data.
|
104 |
|
|
* If no other processes tried to access this node while this
|
105 |
|
|
* process was waiting on disk I/O (if necessary) then the
|
106 |
|
|
* remaining fields are zero ('count', 'resrv', 'lock') or NULL
|
107 |
|
|
* ('wqueue', 'rqueue') corresponding to no accesses.
|
108 |
|
|
* If there were access attempts during I/O then they were blocked
|
109 |
|
|
* until the I/O was complete, and the fields 'count', 'resrv',
|
110 |
|
|
* 'lock', 'wqueue' and 'rqueue' reflect the results of unblocking
|
111 |
|
|
* those processes when the I/O was completed.
|
112 |
|
|
*/
|
113 |
|
|
void hfs_bnode_read(struct hfs_bnode *bnode, struct hfs_btree *tree,
|
114 |
|
|
hfs_u32 node, int sticky)
|
115 |
|
|
{
|
116 |
|
|
struct NodeDescriptor *nd;
|
117 |
|
|
int block, lcv;
|
118 |
|
|
hfs_u16 curr, prev, limit;
|
119 |
|
|
|
120 |
|
|
/* Initialize the structure */
|
121 |
|
|
memset(bnode, 0, sizeof(*bnode));
|
122 |
|
|
bnode->magic = HFS_BNODE_MAGIC;
|
123 |
|
|
bnode->tree = tree;
|
124 |
|
|
bnode->node = node;
|
125 |
|
|
bnode->sticky = sticky;
|
126 |
|
|
hfs_init_waitqueue(&bnode->rqueue);
|
127 |
|
|
hfs_init_waitqueue(&bnode->wqueue);
|
128 |
|
|
|
129 |
|
|
if (sticky == HFS_NOT_STICKY) {
|
130 |
|
|
/* Insert it in the cache if appropriate */
|
131 |
|
|
if ((bnode->next = bhash(tree, node))) {
|
132 |
|
|
bnode->next->prev = bnode;
|
133 |
|
|
}
|
134 |
|
|
bhash(tree, node) = bnode;
|
135 |
|
|
}
|
136 |
|
|
|
137 |
|
|
/* Make the bnode look like it is being
|
138 |
|
|
modified so other processes will wait for
|
139 |
|
|
the I/O to complete */
|
140 |
|
|
bnode->count = bnode->resrv = bnode->lock = 1;
|
141 |
|
|
|
142 |
|
|
/* Read in the node, possibly causing a schedule()
|
143 |
|
|
call. If the I/O fails then emit a warning. Each
|
144 |
|
|
process that was waiting on the bnode (including
|
145 |
|
|
the current one) will notice the failure and
|
146 |
|
|
hfs_bnode_relse() the node. The last hfs_bnode_relse()
|
147 |
|
|
will call hfs_bnode_delete() and discard the bnode. */
|
148 |
|
|
|
149 |
|
|
block = hfs_extent_map(&tree->entry.u.file.data_fork, node, 0);
|
150 |
|
|
if (!block) {
|
151 |
|
|
hfs_warn("hfs_bnode_read: bad node number 0x%08x\n", node);
|
152 |
|
|
} else if (hfs_buffer_ok(bnode->buf =
|
153 |
|
|
hfs_buffer_get(tree->sys_mdb, block, 1))) {
|
154 |
|
|
/* read in the NodeDescriptor */
|
155 |
|
|
nd = (struct NodeDescriptor *)hfs_buffer_data(bnode->buf);
|
156 |
|
|
bnode->ndFLink = hfs_get_hl(nd->ndFLink);
|
157 |
|
|
bnode->ndBLink = hfs_get_hl(nd->ndBLink);
|
158 |
|
|
bnode->ndType = nd->ndType;
|
159 |
|
|
bnode->ndNHeight = nd->ndNHeight;
|
160 |
|
|
bnode->ndNRecs = hfs_get_hs(nd->ndNRecs);
|
161 |
|
|
|
162 |
|
|
/* verify the integrity of the node */
|
163 |
|
|
prev = sizeof(struct NodeDescriptor);
|
164 |
|
|
limit = HFS_SECTOR_SIZE - sizeof(hfs_u16)*(bnode->ndNRecs + 1);
|
165 |
|
|
for (lcv=1; lcv <= (bnode->ndNRecs + 1); ++lcv) {
|
166 |
|
|
curr = hfs_get_hs(RECTBL(bnode, lcv));
|
167 |
|
|
if ((curr < prev) || (curr > limit)) {
|
168 |
|
|
hfs_warn("hfs_bnode_read: corrupt node "
|
169 |
|
|
"number 0x%08x\n", node);
|
170 |
|
|
hfs_buffer_put(bnode->buf);
|
171 |
|
|
bnode->buf = NULL;
|
172 |
|
|
break;
|
173 |
|
|
}
|
174 |
|
|
prev = curr;
|
175 |
|
|
}
|
176 |
|
|
}
|
177 |
|
|
|
178 |
|
|
/* Undo our fakery with the lock state and
|
179 |
|
|
hfs_wake_up() anyone who we managed to trick */
|
180 |
|
|
--bnode->count;
|
181 |
|
|
bnode->resrv = bnode->lock = 0;
|
182 |
|
|
hfs_wake_up(&bnode->rqueue);
|
183 |
|
|
}
|
184 |
|
|
|
185 |
|
|
/*
|
186 |
|
|
* hfs_bnode_lock()
|
187 |
|
|
*
|
188 |
|
|
* Description:
|
189 |
|
|
* This function does the locking of a bnode.
|
190 |
|
|
* Input Variable(s):
|
191 |
|
|
* struct hfs_bnode *bn: pointer to the (struct hfs_bnode) to lock
|
192 |
|
|
* int lock_type: the type of lock desired
|
193 |
|
|
* Output Variable(s):
|
194 |
|
|
* NONE
|
195 |
|
|
* Returns:
|
196 |
|
|
* void
|
197 |
|
|
* Preconditions:
|
198 |
|
|
* 'bn' points to a "valid" (struct hfs_bnode).
|
199 |
|
|
* 'lock_type' is a valid hfs_lock_t
|
200 |
|
|
* Postconditions:
|
201 |
|
|
* The 'count' field of 'bn' is incremented by one. If 'lock_type'
|
202 |
|
|
* is HFS_LOCK_RESRV the 'resrv' field is also incremented.
|
203 |
|
|
*/
|
204 |
|
|
void hfs_bnode_lock(struct hfs_bnode_ref *bnr, int lock_type)
|
205 |
|
|
{
|
206 |
|
|
struct hfs_bnode *bn = bnr->bn;
|
207 |
|
|
|
208 |
|
|
if ((lock_type == bnr->lock_type) || !bn) {
|
209 |
|
|
return;
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
if (bnr->lock_type == HFS_LOCK_WRITE) {
|
213 |
|
|
hfs_bnode_commit(bnr->bn);
|
214 |
|
|
}
|
215 |
|
|
|
216 |
|
|
switch (lock_type) {
|
217 |
|
|
default:
|
218 |
|
|
goto bail;
|
219 |
|
|
break;
|
220 |
|
|
|
221 |
|
|
case HFS_LOCK_READ:
|
222 |
|
|
/* We may not obtain read access if any process is
|
223 |
|
|
currently modifying or waiting to modify this node.
|
224 |
|
|
If we can't obtain access we wait on the rqueue
|
225 |
|
|
wait queue to be woken up by the modifying process
|
226 |
|
|
when it relinquishes its lock. */
|
227 |
|
|
switch (bnr->lock_type) {
|
228 |
|
|
default:
|
229 |
|
|
goto bail;
|
230 |
|
|
break;
|
231 |
|
|
|
232 |
|
|
case HFS_LOCK_NONE:
|
233 |
|
|
while (bn->lock || waitqueue_active(&bn->wqueue)) {
|
234 |
|
|
hfs_sleep_on(&bn->rqueue);
|
235 |
|
|
}
|
236 |
|
|
++bn->count;
|
237 |
|
|
break;
|
238 |
|
|
}
|
239 |
|
|
break;
|
240 |
|
|
|
241 |
|
|
case HFS_LOCK_RESRV:
|
242 |
|
|
/* We may not obtain a reservation (read access with
|
243 |
|
|
an option to write later), if any process currently
|
244 |
|
|
holds a reservation on this node. That includes
|
245 |
|
|
any process which is currently modifying this node.
|
246 |
|
|
If we can't obtain access, then we wait on the
|
247 |
|
|
rqueue wait queue to e woken up by the
|
248 |
|
|
reservation-holder when it calls hfs_bnode_relse. */
|
249 |
|
|
switch (bnr->lock_type) {
|
250 |
|
|
default:
|
251 |
|
|
goto bail;
|
252 |
|
|
break;
|
253 |
|
|
|
254 |
|
|
case HFS_LOCK_NONE:
|
255 |
|
|
while (bn->resrv) {
|
256 |
|
|
hfs_sleep_on(&bn->rqueue);
|
257 |
|
|
}
|
258 |
|
|
bn->resrv = 1;
|
259 |
|
|
++bn->count;
|
260 |
|
|
break;
|
261 |
|
|
|
262 |
|
|
case HFS_LOCK_WRITE:
|
263 |
|
|
bn->lock = 0;
|
264 |
|
|
hfs_wake_up(&bn->rqueue);
|
265 |
|
|
break;
|
266 |
|
|
}
|
267 |
|
|
break;
|
268 |
|
|
|
269 |
|
|
case HFS_LOCK_WRITE:
|
270 |
|
|
switch (bnr->lock_type) {
|
271 |
|
|
default:
|
272 |
|
|
goto bail;
|
273 |
|
|
break;
|
274 |
|
|
|
275 |
|
|
case HFS_LOCK_NONE:
|
276 |
|
|
while (bn->resrv) {
|
277 |
|
|
hfs_sleep_on(&bn->rqueue);
|
278 |
|
|
}
|
279 |
|
|
bn->resrv = 1;
|
280 |
|
|
++bn->count;
|
281 |
|
|
case HFS_LOCK_RESRV:
|
282 |
|
|
while (bn->count > 1) {
|
283 |
|
|
hfs_sleep_on(&bn->wqueue);
|
284 |
|
|
}
|
285 |
|
|
bn->lock = 1;
|
286 |
|
|
break;
|
287 |
|
|
}
|
288 |
|
|
break;
|
289 |
|
|
|
290 |
|
|
case HFS_LOCK_NONE:
|
291 |
|
|
switch (bnr->lock_type) {
|
292 |
|
|
default:
|
293 |
|
|
goto bail;
|
294 |
|
|
break;
|
295 |
|
|
|
296 |
|
|
case HFS_LOCK_READ:
|
297 |
|
|
/* This process was reading this node. If
|
298 |
|
|
there is now exactly one other process using
|
299 |
|
|
the node then hfs_wake_up() a (potentially
|
300 |
|
|
nonexistent) waiting process. Note that I
|
301 |
|
|
refer to "a" process since the reservation
|
302 |
|
|
system ensures that only one process can
|
303 |
|
|
get itself on the wait queue. */
|
304 |
|
|
if (bn->count == 2) {
|
305 |
|
|
hfs_wake_up(&bn->wqueue);
|
306 |
|
|
}
|
307 |
|
|
break;
|
308 |
|
|
|
309 |
|
|
case HFS_LOCK_WRITE:
|
310 |
|
|
/* This process was modifying this node.
|
311 |
|
|
Unlock the node and fall-through to the
|
312 |
|
|
HFS_LOCK_RESRV case, since a 'reservation'
|
313 |
|
|
is a prerequisite for HFS_LOCK_WRITE. */
|
314 |
|
|
bn->lock = 0;
|
315 |
|
|
case HFS_LOCK_RESRV:
|
316 |
|
|
/* This process had placed a 'reservation' on
|
317 |
|
|
this node, indicating an intention to
|
318 |
|
|
possibly modify the node. We can get to
|
319 |
|
|
this spot directly (if the 'reservation'
|
320 |
|
|
not converted to a HFS_LOCK_WRITE), or by
|
321 |
|
|
falling through from the above case if the
|
322 |
|
|
reservation was converted.
|
323 |
|
|
Since HFS_LOCK_RESRV and HFS_LOCK_WRITE
|
324 |
|
|
both block processes that want access
|
325 |
|
|
(HFS_LOCK_RESRV blocks other processes that
|
326 |
|
|
want reservations but allow HFS_LOCK_READ
|
327 |
|
|
accesses, while HFS_LOCK_WRITE must have
|
328 |
|
|
exclusive access and thus blocks both
|
329 |
|
|
types) we hfs_wake_up() any processes that
|
330 |
|
|
might be waiting for access. If multiple
|
331 |
|
|
processes are waiting for a reservation
|
332 |
|
|
then the magic of process scheduling will
|
333 |
|
|
settle the dispute. */
|
334 |
|
|
bn->resrv = 0;
|
335 |
|
|
hfs_wake_up(&bn->rqueue);
|
336 |
|
|
break;
|
337 |
|
|
}
|
338 |
|
|
--bn->count;
|
339 |
|
|
break;
|
340 |
|
|
}
|
341 |
|
|
bnr->lock_type = lock_type;
|
342 |
|
|
return;
|
343 |
|
|
|
344 |
|
|
bail:
|
345 |
|
|
hfs_warn("hfs_bnode_lock: invalid lock change: %d->%d.\n",
|
346 |
|
|
bnr->lock_type, lock_type);
|
347 |
|
|
return;
|
348 |
|
|
}
|
349 |
|
|
|
350 |
|
|
/*
|
351 |
|
|
* hfs_bnode_relse()
|
352 |
|
|
*
|
353 |
|
|
* Description:
|
354 |
|
|
* This function is called when a process is done using a bnode. If
|
355 |
|
|
* the proper conditions are met then we call hfs_bnode_delete() to remove
|
356 |
|
|
* it from the cache. If it is not deleted then we update its state
|
357 |
|
|
* to reflect one less process using it.
|
358 |
|
|
* Input Variable(s):
|
359 |
|
|
* struct hfs_bnode *bn: pointer to the (struct hfs_bnode) to release.
|
360 |
|
|
* int lock_type: The type of lock held by the process releasing this node.
|
361 |
|
|
* Output Variable(s):
|
362 |
|
|
* NONE
|
363 |
|
|
* Returns:
|
364 |
|
|
* void
|
365 |
|
|
* Preconditions:
|
366 |
|
|
* 'bn' is NULL or points to a "valid" (struct hfs_bnode).
|
367 |
|
|
* Postconditions:
|
368 |
|
|
* If 'bn' meets the appropriate conditions (see below) then it is
|
369 |
|
|
* kept in the cache and all fields are set to consistent values
|
370 |
|
|
* which reflect one less process using the node than upon entry.
|
371 |
|
|
* If 'bn' does not meet the conditions then it is deleted (see
|
372 |
|
|
* hfs_bnode_delete() for postconditions).
|
373 |
|
|
* In either case, if 'lock_type' is HFS_LOCK_WRITE
|
374 |
|
|
* then the corresponding buffer is dirtied.
|
375 |
|
|
*/
|
376 |
|
|
void hfs_bnode_relse(struct hfs_bnode_ref *bnr)
|
377 |
|
|
{
|
378 |
|
|
struct hfs_bnode *bn;
|
379 |
|
|
|
380 |
|
|
if (!bnr || !(bn = bnr->bn)) {
|
381 |
|
|
return;
|
382 |
|
|
}
|
383 |
|
|
|
384 |
|
|
/* We update the lock state of the node if it is still in use
|
385 |
|
|
or if it is "sticky" (such as the B-tree head and root).
|
386 |
|
|
Otherwise we just delete it. */
|
387 |
|
|
if ((bn->count > 1) || (waitqueue_active(&bn->rqueue)) || (bn->sticky != HFS_NOT_STICKY)) {
|
388 |
|
|
hfs_bnode_lock(bnr, HFS_LOCK_NONE);
|
389 |
|
|
} else {
|
390 |
|
|
/* dirty buffer if we (might) have modified it */
|
391 |
|
|
if (bnr->lock_type == HFS_LOCK_WRITE) {
|
392 |
|
|
hfs_bnode_commit(bn);
|
393 |
|
|
}
|
394 |
|
|
hfs_bnode_delete(bn);
|
395 |
|
|
bnr->lock_type = HFS_LOCK_NONE;
|
396 |
|
|
}
|
397 |
|
|
bnr->bn = NULL;
|
398 |
|
|
}
|
399 |
|
|
|
400 |
|
|
/*
|
401 |
|
|
* hfs_bnode_find()
|
402 |
|
|
*
|
403 |
|
|
* Description:
|
404 |
|
|
* This function is called to obtain a bnode. The cache is
|
405 |
|
|
* searched for the node. If it not found there it is added to
|
406 |
|
|
* the cache by hfs_bnode_read(). There are two special cases node=0
|
407 |
|
|
* (the header node) and node='tree'->bthRoot (the root node), in
|
408 |
|
|
* which the nodes are obtained from fields of 'tree' without
|
409 |
|
|
* consulting or modifying the cache.
|
410 |
|
|
* Input Variable(s):
|
411 |
|
|
* struct hfs_tree *tree: pointer to the (struct hfs_btree) from
|
412 |
|
|
* which to get a node.
|
413 |
|
|
* int node: the node number to get from 'tree'.
|
414 |
|
|
* int lock_type: The kind of access (HFS_LOCK_READ, or
|
415 |
|
|
* HFS_LOCK_RESRV) to obtain to the node
|
416 |
|
|
* Output Variable(s):
|
417 |
|
|
* NONE
|
418 |
|
|
* Returns:
|
419 |
|
|
* (struct hfs_bnode_ref) Reference to the requested node.
|
420 |
|
|
* Preconditions:
|
421 |
|
|
* 'tree' points to a "valid" (struct hfs_btree).
|
422 |
|
|
* Postconditions:
|
423 |
|
|
* If 'node' refers to a valid node in 'tree' and 'lock_type' has
|
424 |
|
|
* one of the values listed above and no I/O errors occur then the
|
425 |
|
|
* value returned refers to a valid (struct hfs_bnode) corresponding
|
426 |
|
|
* to the requested node with the requested access type. The node
|
427 |
|
|
* is also added to the cache if not previously present and not the
|
428 |
|
|
* root or header.
|
429 |
|
|
* If the conditions given above are not met, the bnode in the
|
430 |
|
|
* returned reference is NULL.
|
431 |
|
|
*/
|
432 |
|
|
struct hfs_bnode_ref hfs_bnode_find(struct hfs_btree *tree,
|
433 |
|
|
hfs_u32 node, int lock_type)
|
434 |
|
|
{
|
435 |
|
|
struct hfs_bnode *bn;
|
436 |
|
|
struct hfs_bnode *empty = NULL;
|
437 |
|
|
struct hfs_bnode_ref bnr;
|
438 |
|
|
|
439 |
|
|
bnr.lock_type = HFS_LOCK_NONE;
|
440 |
|
|
bnr.bn = NULL;
|
441 |
|
|
|
442 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
443 |
|
|
hfs_warn("hfs_bnode_find: %c %d:%d\n",
|
444 |
|
|
lock_type==HFS_LOCK_READ?'R':
|
445 |
|
|
(lock_type==HFS_LOCK_RESRV?'V':'W'),
|
446 |
|
|
(int)ntohl(tree->entry.cnid), node);
|
447 |
|
|
#endif
|
448 |
|
|
|
449 |
|
|
/* check special cases */
|
450 |
|
|
if (!node) {
|
451 |
|
|
bn = &tree->head;
|
452 |
|
|
goto return_it;
|
453 |
|
|
} else if (node == tree->bthRoot) {
|
454 |
|
|
bn = tree->root;
|
455 |
|
|
goto return_it;
|
456 |
|
|
}
|
457 |
|
|
|
458 |
|
|
restart:
|
459 |
|
|
/* look for the node in the cache. */
|
460 |
|
|
bn = bhash(tree, node);
|
461 |
|
|
while (bn && (bn->magic == HFS_BNODE_MAGIC)) {
|
462 |
|
|
if (bn->node == node) {
|
463 |
|
|
goto found_it;
|
464 |
|
|
}
|
465 |
|
|
bn = bn->next;
|
466 |
|
|
}
|
467 |
|
|
|
468 |
|
|
if (!empty) {
|
469 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
470 |
|
|
++bnode_count;
|
471 |
|
|
#endif
|
472 |
|
|
if (HFS_NEW(empty)) {
|
473 |
|
|
goto restart;
|
474 |
|
|
}
|
475 |
|
|
return bnr;
|
476 |
|
|
}
|
477 |
|
|
bn = empty;
|
478 |
|
|
hfs_bnode_read(bn, tree, node, HFS_NOT_STICKY);
|
479 |
|
|
goto return_it;
|
480 |
|
|
|
481 |
|
|
found_it:
|
482 |
|
|
/* check validity */
|
483 |
|
|
if (bn->magic != HFS_BNODE_MAGIC) {
|
484 |
|
|
/* If we find a corrupt bnode then we return
|
485 |
|
|
NULL. However, we don't try to remove it
|
486 |
|
|
from the cache or release its resources
|
487 |
|
|
since we have no idea what kind of trouble
|
488 |
|
|
we could get into that way. */
|
489 |
|
|
hfs_warn("hfs_bnode_find: bnode cache is corrupt.\n");
|
490 |
|
|
return bnr;
|
491 |
|
|
}
|
492 |
|
|
if (empty) {
|
493 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
494 |
|
|
--bnode_count;
|
495 |
|
|
#endif
|
496 |
|
|
HFS_DELETE(empty);
|
497 |
|
|
}
|
498 |
|
|
|
499 |
|
|
return_it:
|
500 |
|
|
/* Wait our turn */
|
501 |
|
|
bnr.bn = bn;
|
502 |
|
|
hfs_bnode_lock(&bnr, lock_type);
|
503 |
|
|
|
504 |
|
|
/* Check for failure to read the node from disk */
|
505 |
|
|
if (!hfs_buffer_ok(bn->buf)) {
|
506 |
|
|
hfs_bnode_relse(&bnr);
|
507 |
|
|
}
|
508 |
|
|
|
509 |
|
|
#if defined(DEBUG_BNODES) || defined(DEBUG_ALL)
|
510 |
|
|
if (!bnr.bn) {
|
511 |
|
|
hfs_warn("hfs_bnode_find: failed\n");
|
512 |
|
|
} else {
|
513 |
|
|
hfs_warn("hfs_bnode_find: use %d(%d) lvl %d [%d]\n", bn->count,
|
514 |
|
|
bn->buf->b_count, bn->ndNHeight, bnode_count);
|
515 |
|
|
hfs_warn("hfs_bnode_find: blnk %u flnk %u recs %u\n",
|
516 |
|
|
bn->ndBLink, bn->ndFLink, bn->ndNRecs);
|
517 |
|
|
}
|
518 |
|
|
#endif
|
519 |
|
|
|
520 |
|
|
return bnr;
|
521 |
|
|
}
|
522 |
|
|
|
523 |
|
|
/*
|
524 |
|
|
* hfs_bnode_commit()
|
525 |
|
|
*
|
526 |
|
|
* Called to write a possibly dirty bnode back to disk.
|
527 |
|
|
*/
|
528 |
|
|
void hfs_bnode_commit(struct hfs_bnode *bn)
|
529 |
|
|
{
|
530 |
|
|
if (hfs_buffer_ok(bn->buf)) {
|
531 |
|
|
struct NodeDescriptor *nd;
|
532 |
|
|
nd = (struct NodeDescriptor *)hfs_buffer_data(bn->buf);
|
533 |
|
|
|
534 |
|
|
hfs_put_hl(bn->ndFLink, nd->ndFLink);
|
535 |
|
|
hfs_put_hl(bn->ndBLink, nd->ndBLink);
|
536 |
|
|
nd->ndType = bn->ndType;
|
537 |
|
|
nd->ndNHeight = bn->ndNHeight;
|
538 |
|
|
hfs_put_hs(bn->ndNRecs, nd->ndNRecs);
|
539 |
|
|
hfs_buffer_dirty(bn->buf);
|
540 |
|
|
|
541 |
|
|
/* increment write count */
|
542 |
|
|
hfs_mdb_dirty(bn->tree->sys_mdb);
|
543 |
|
|
}
|
544 |
|
|
}
|