1 |
1275 |
phoenix |
/*
|
2 |
|
|
* Copyright 2000-2002 by Hans Reiser, licensing governed by reiserfs/README
|
3 |
|
|
*/
|
4 |
|
|
|
5 |
|
|
/*
|
6 |
|
|
* Written by Anatoly P. Pinchuk pap@namesys.botik.ru
|
7 |
|
|
* Programm System Institute
|
8 |
|
|
* Pereslavl-Zalessky Russia
|
9 |
|
|
*/
|
10 |
|
|
|
11 |
|
|
/*
|
12 |
|
|
* This file contains functions dealing with S+tree
|
13 |
|
|
*
|
14 |
|
|
* B_IS_IN_TREE
|
15 |
|
|
* copy_short_key
|
16 |
|
|
* copy_item_head
|
17 |
|
|
* comp_short_keys
|
18 |
|
|
* comp_keys
|
19 |
|
|
* comp_cpu_keys
|
20 |
|
|
* comp_short_le_keys
|
21 |
|
|
* comp_short_cpu_keys
|
22 |
|
|
* cpu_key2cpu_key
|
23 |
|
|
* le_key2cpu_key
|
24 |
|
|
* comp_le_keys
|
25 |
|
|
* bin_search
|
26 |
|
|
* get_lkey
|
27 |
|
|
* get_rkey
|
28 |
|
|
* key_in_buffer
|
29 |
|
|
* decrement_bcount
|
30 |
|
|
* decrement_counters_in_path
|
31 |
|
|
* reiserfs_check_path
|
32 |
|
|
* pathrelse_and_restore
|
33 |
|
|
* pathrelse
|
34 |
|
|
* search_by_key_reada
|
35 |
|
|
* search_by_key
|
36 |
|
|
* search_for_position_by_key
|
37 |
|
|
* comp_items
|
38 |
|
|
* prepare_for_direct_item
|
39 |
|
|
* prepare_for_direntry_item
|
40 |
|
|
* prepare_for_delete_or_cut
|
41 |
|
|
* calc_deleted_bytes_number
|
42 |
|
|
* init_tb_struct
|
43 |
|
|
* padd_item
|
44 |
|
|
* reiserfs_delete_item
|
45 |
|
|
* reiserfs_delete_solid_item
|
46 |
|
|
* reiserfs_delete_object
|
47 |
|
|
* maybe_indirect_to_direct
|
48 |
|
|
* indirect_to_direct_roll_back
|
49 |
|
|
* reiserfs_cut_from_item
|
50 |
|
|
* truncate_directory
|
51 |
|
|
* reiserfs_do_truncate
|
52 |
|
|
* reiserfs_paste_into_item
|
53 |
|
|
* reiserfs_insert_item
|
54 |
|
|
*/
|
55 |
|
|
|
56 |
|
|
#include <linux/config.h>
|
57 |
|
|
#include <linux/sched.h>
|
58 |
|
|
#include <linux/string.h>
|
59 |
|
|
#include <linux/locks.h>
|
60 |
|
|
#include <linux/pagemap.h>
|
61 |
|
|
#include <linux/reiserfs_fs.h>
|
62 |
|
|
#include <linux/smp_lock.h>
|
63 |
|
|
|
64 |
|
|
/* Does the buffer contain a disk block which is in the tree. */
|
65 |
|
|
inline int B_IS_IN_TREE (const struct buffer_head * p_s_bh)
|
66 |
|
|
{
|
67 |
|
|
|
68 |
|
|
RFALSE( B_LEVEL (p_s_bh) > MAX_HEIGHT,
|
69 |
|
|
"PAP-1010: block (%b) has too big level (%z)", p_s_bh, p_s_bh);
|
70 |
|
|
|
71 |
|
|
return ( B_LEVEL (p_s_bh) != FREE_LEVEL );
|
72 |
|
|
}
|
73 |
|
|
|
74 |
|
|
|
75 |
|
|
|
76 |
|
|
|
77 |
|
|
inline void copy_short_key (void * to, const void * from)
|
78 |
|
|
{
|
79 |
|
|
memcpy (to, from, SHORT_KEY_SIZE);
|
80 |
|
|
}
|
81 |
|
|
|
82 |
|
|
//
|
83 |
|
|
// to gets item head in le form
|
84 |
|
|
//
|
85 |
|
|
inline void copy_item_head(struct item_head * p_v_to,
|
86 |
|
|
const struct item_head * p_v_from)
|
87 |
|
|
{
|
88 |
|
|
memcpy (p_v_to, p_v_from, IH_SIZE);
|
89 |
|
|
}
|
90 |
|
|
|
91 |
|
|
|
92 |
|
|
/* k1 is pointer to on-disk structure which is stored in little-endian
|
93 |
|
|
form. k2 is pointer to cpu variable. For key of items of the same
|
94 |
|
|
object this returns 0.
|
95 |
|
|
Returns: -1 if key1 < key2
|
96 |
|
|
|
97 |
|
|
1 if key1 > key2 */
|
98 |
|
|
inline int comp_short_keys (const struct key * le_key,
|
99 |
|
|
const struct cpu_key * cpu_key)
|
100 |
|
|
{
|
101 |
|
|
__u32 * p_s_le_u32, * p_s_cpu_u32;
|
102 |
|
|
int n_key_length = REISERFS_SHORT_KEY_LEN;
|
103 |
|
|
|
104 |
|
|
p_s_le_u32 = (__u32 *)le_key;
|
105 |
|
|
p_s_cpu_u32 = (__u32 *)cpu_key;
|
106 |
|
|
for( ; n_key_length--; ++p_s_le_u32, ++p_s_cpu_u32 ) {
|
107 |
|
|
if ( le32_to_cpu (*p_s_le_u32) < *p_s_cpu_u32 )
|
108 |
|
|
return -1;
|
109 |
|
|
if ( le32_to_cpu (*p_s_le_u32) > *p_s_cpu_u32 )
|
110 |
|
|
return 1;
|
111 |
|
|
}
|
112 |
|
|
|
113 |
|
|
return 0;
|
114 |
|
|
}
|
115 |
|
|
|
116 |
|
|
|
117 |
|
|
/* k1 is pointer to on-disk structure which is stored in little-endian
|
118 |
|
|
form. k2 is pointer to cpu variable.
|
119 |
|
|
Compare keys using all 4 key fields.
|
120 |
|
|
Returns: -1 if key1 < key2 0
|
121 |
|
|
if key1 = key2 1 if key1 > key2 */
|
122 |
|
|
inline int comp_keys (const struct key * le_key, const struct cpu_key * cpu_key)
|
123 |
|
|
{
|
124 |
|
|
int retval;
|
125 |
|
|
|
126 |
|
|
retval = comp_short_keys (le_key, cpu_key);
|
127 |
|
|
if (retval)
|
128 |
|
|
return retval;
|
129 |
|
|
if (le_key_k_offset (le_key_version(le_key), le_key) < cpu_key_k_offset (cpu_key))
|
130 |
|
|
return -1;
|
131 |
|
|
if (le_key_k_offset (le_key_version(le_key), le_key) > cpu_key_k_offset (cpu_key))
|
132 |
|
|
return 1;
|
133 |
|
|
|
134 |
|
|
if (cpu_key->key_length == 3)
|
135 |
|
|
return 0;
|
136 |
|
|
|
137 |
|
|
/* this part is needed only when tail conversion is in progress */
|
138 |
|
|
if (le_key_k_type (le_key_version(le_key), le_key) < cpu_key_k_type (cpu_key))
|
139 |
|
|
return -1;
|
140 |
|
|
|
141 |
|
|
if (le_key_k_type (le_key_version(le_key), le_key) > cpu_key_k_type (cpu_key))
|
142 |
|
|
return 1;
|
143 |
|
|
|
144 |
|
|
return 0;
|
145 |
|
|
}
|
146 |
|
|
|
147 |
|
|
|
148 |
|
|
//
|
149 |
|
|
// FIXME: not used yet
|
150 |
|
|
//
|
151 |
|
|
inline int comp_cpu_keys (const struct cpu_key * key1,
|
152 |
|
|
const struct cpu_key * key2)
|
153 |
|
|
{
|
154 |
|
|
if (key1->on_disk_key.k_dir_id < key2->on_disk_key.k_dir_id)
|
155 |
|
|
return -1;
|
156 |
|
|
if (key1->on_disk_key.k_dir_id > key2->on_disk_key.k_dir_id)
|
157 |
|
|
return 1;
|
158 |
|
|
|
159 |
|
|
if (key1->on_disk_key.k_objectid < key2->on_disk_key.k_objectid)
|
160 |
|
|
return -1;
|
161 |
|
|
if (key1->on_disk_key.k_objectid > key2->on_disk_key.k_objectid)
|
162 |
|
|
return 1;
|
163 |
|
|
|
164 |
|
|
if (cpu_key_k_offset (key1) < cpu_key_k_offset (key2))
|
165 |
|
|
return -1;
|
166 |
|
|
if (cpu_key_k_offset (key1) > cpu_key_k_offset (key2))
|
167 |
|
|
return 1;
|
168 |
|
|
|
169 |
|
|
reiserfs_warning (NULL, "comp_cpu_keys: type are compared for %K and %K\n",
|
170 |
|
|
key1, key2);
|
171 |
|
|
|
172 |
|
|
if (cpu_key_k_type (key1) < cpu_key_k_type (key2))
|
173 |
|
|
return -1;
|
174 |
|
|
if (cpu_key_k_type (key1) > cpu_key_k_type (key2))
|
175 |
|
|
return 1;
|
176 |
|
|
return 0;
|
177 |
|
|
}
|
178 |
|
|
|
179 |
|
|
inline int comp_short_le_keys (const struct key * key1, const struct key * key2)
|
180 |
|
|
{
|
181 |
|
|
__u32 * p_s_1_u32, * p_s_2_u32;
|
182 |
|
|
int n_key_length = REISERFS_SHORT_KEY_LEN;
|
183 |
|
|
|
184 |
|
|
p_s_1_u32 = (__u32 *)key1;
|
185 |
|
|
p_s_2_u32 = (__u32 *)key2;
|
186 |
|
|
for( ; n_key_length--; ++p_s_1_u32, ++p_s_2_u32 ) {
|
187 |
|
|
if ( le32_to_cpu (*p_s_1_u32) < le32_to_cpu (*p_s_2_u32) )
|
188 |
|
|
return -1;
|
189 |
|
|
if ( le32_to_cpu (*p_s_1_u32) > le32_to_cpu (*p_s_2_u32) )
|
190 |
|
|
return 1;
|
191 |
|
|
}
|
192 |
|
|
return 0;
|
193 |
|
|
}
|
194 |
|
|
|
195 |
|
|
inline int comp_short_cpu_keys (const struct cpu_key * key1,
|
196 |
|
|
const struct cpu_key * key2)
|
197 |
|
|
{
|
198 |
|
|
__u32 * p_s_1_u32, * p_s_2_u32;
|
199 |
|
|
int n_key_length = REISERFS_SHORT_KEY_LEN;
|
200 |
|
|
|
201 |
|
|
p_s_1_u32 = (__u32 *)key1;
|
202 |
|
|
p_s_2_u32 = (__u32 *)key2;
|
203 |
|
|
|
204 |
|
|
for( ; n_key_length--; ++p_s_1_u32, ++p_s_2_u32 ) {
|
205 |
|
|
if ( *p_s_1_u32 < *p_s_2_u32 )
|
206 |
|
|
return -1;
|
207 |
|
|
if ( *p_s_1_u32 > *p_s_2_u32 )
|
208 |
|
|
return 1;
|
209 |
|
|
}
|
210 |
|
|
return 0;
|
211 |
|
|
}
|
212 |
|
|
|
213 |
|
|
|
214 |
|
|
|
215 |
|
|
inline void cpu_key2cpu_key (struct cpu_key * to, const struct cpu_key * from)
|
216 |
|
|
{
|
217 |
|
|
memcpy (to, from, sizeof (struct cpu_key));
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
|
221 |
|
|
inline void le_key2cpu_key (struct cpu_key * to, const struct key * from)
|
222 |
|
|
{
|
223 |
|
|
to->on_disk_key.k_dir_id = le32_to_cpu (from->k_dir_id);
|
224 |
|
|
to->on_disk_key.k_objectid = le32_to_cpu (from->k_objectid);
|
225 |
|
|
|
226 |
|
|
// find out version of the key
|
227 |
|
|
to->version = le_key_version (from);
|
228 |
|
|
if (to->version == KEY_FORMAT_3_5) {
|
229 |
|
|
to->on_disk_key.u.k_offset_v1.k_offset = le32_to_cpu (from->u.k_offset_v1.k_offset);
|
230 |
|
|
to->on_disk_key.u.k_offset_v1.k_uniqueness = le32_to_cpu (from->u.k_offset_v1.k_uniqueness);
|
231 |
|
|
} else {
|
232 |
|
|
to->on_disk_key.u.k_offset_v2.k_offset = offset_v2_k_offset(&from->u.k_offset_v2);
|
233 |
|
|
to->on_disk_key.u.k_offset_v2.k_type = offset_v2_k_type(&from->u.k_offset_v2);
|
234 |
|
|
}
|
235 |
|
|
}
|
236 |
|
|
|
237 |
|
|
|
238 |
|
|
|
239 |
|
|
// this does not say which one is bigger, it only returns 1 if keys
|
240 |
|
|
// are not equal, 0 otherwise
|
241 |
|
|
inline int comp_le_keys (const struct key * k1, const struct key * k2)
|
242 |
|
|
{
|
243 |
|
|
return memcmp (k1, k2, sizeof (struct key));
|
244 |
|
|
}
|
245 |
|
|
|
246 |
|
|
/**************************************************************************
|
247 |
|
|
* Binary search toolkit function *
|
248 |
|
|
* Search for an item in the array by the item key *
|
249 |
|
|
* Returns: 1 if found, 0 if not found; *
|
250 |
|
|
* *p_n_pos = number of the searched element if found, else the *
|
251 |
|
|
* number of the first element that is larger than p_v_key. *
|
252 |
|
|
**************************************************************************/
|
253 |
|
|
/* For those not familiar with binary search: n_lbound is the leftmost item that it
|
254 |
|
|
could be, n_rbound the rightmost item that it could be. We examine the item
|
255 |
|
|
halfway between n_lbound and n_rbound, and that tells us either that we can increase
|
256 |
|
|
n_lbound, or decrease n_rbound, or that we have found it, or if n_lbound <= n_rbound that
|
257 |
|
|
there are no possible items, and we have not found it. With each examination we
|
258 |
|
|
cut the number of possible items it could be by one more than half rounded down,
|
259 |
|
|
or we find it. */
|
260 |
|
|
inline int bin_search (
|
261 |
|
|
const void * p_v_key, /* Key to search for. */
|
262 |
|
|
const void * p_v_base,/* First item in the array. */
|
263 |
|
|
int p_n_num, /* Number of items in the array. */
|
264 |
|
|
int p_n_width, /* Item size in the array.
|
265 |
|
|
searched. Lest the reader be
|
266 |
|
|
confused, note that this is crafted
|
267 |
|
|
as a general function, and when it
|
268 |
|
|
is applied specifically to the array
|
269 |
|
|
of item headers in a node, p_n_width
|
270 |
|
|
is actually the item header size not
|
271 |
|
|
the item size. */
|
272 |
|
|
int * p_n_pos /* Number of the searched for element. */
|
273 |
|
|
) {
|
274 |
|
|
int n_rbound, n_lbound, n_j;
|
275 |
|
|
|
276 |
|
|
for ( n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0))/2; n_lbound <= n_rbound; n_j = (n_rbound + n_lbound)/2 )
|
277 |
|
|
switch( COMP_KEYS((struct key *)((char * )p_v_base + n_j * p_n_width), (struct cpu_key *)p_v_key) ) {
|
278 |
|
|
case -1: n_lbound = n_j + 1; continue;
|
279 |
|
|
case 1: n_rbound = n_j - 1; continue;
|
280 |
|
|
case 0: *p_n_pos = n_j; return ITEM_FOUND; /* Key found in the array. */
|
281 |
|
|
}
|
282 |
|
|
|
283 |
|
|
/* bin_search did not find given key, it returns position of key,
|
284 |
|
|
that is minimal and greater than the given one. */
|
285 |
|
|
*p_n_pos = n_lbound;
|
286 |
|
|
return ITEM_NOT_FOUND;
|
287 |
|
|
}
|
288 |
|
|
|
289 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
290 |
|
|
extern struct tree_balance * cur_tb;
|
291 |
|
|
#endif
|
292 |
|
|
|
293 |
|
|
|
294 |
|
|
|
295 |
|
|
/* Minimal possible key. It is never in the tree. */
|
296 |
|
|
const struct key MIN_KEY = {0, 0, {{0, 0},}};
|
297 |
|
|
|
298 |
|
|
/* Maximal possible key. It is never in the tree. */
|
299 |
|
|
const struct key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}};
|
300 |
|
|
|
301 |
|
|
|
302 |
|
|
/* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom
|
303 |
|
|
of the path, and going upwards. We must check the path's validity at each step. If the key is not in
|
304 |
|
|
the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
|
305 |
|
|
case we return a special key, either MIN_KEY or MAX_KEY. */
|
306 |
|
|
inline const struct key * get_lkey (
|
307 |
|
|
const struct path * p_s_chk_path,
|
308 |
|
|
const struct super_block * p_s_sb
|
309 |
|
|
) {
|
310 |
|
|
int n_position, n_path_offset = p_s_chk_path->path_length;
|
311 |
|
|
struct buffer_head * p_s_parent;
|
312 |
|
|
|
313 |
|
|
RFALSE( n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
|
314 |
|
|
"PAP-5010: illegal offset in the path");
|
315 |
|
|
|
316 |
|
|
/* While not higher in path than first element. */
|
317 |
|
|
while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
|
318 |
|
|
|
319 |
|
|
RFALSE( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
|
320 |
|
|
"PAP-5020: parent is not uptodate");
|
321 |
|
|
|
322 |
|
|
/* Parent at the path is not in the tree now. */
|
323 |
|
|
if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
|
324 |
|
|
return &MAX_KEY;
|
325 |
|
|
/* Check whether position in the parent is correct. */
|
326 |
|
|
if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
|
327 |
|
|
return &MAX_KEY;
|
328 |
|
|
/* Check whether parent at the path really points to the child. */
|
329 |
|
|
if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
|
330 |
|
|
PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
|
331 |
|
|
return &MAX_KEY;
|
332 |
|
|
/* Return delimiting key if position in the parent is not equal to zero. */
|
333 |
|
|
if ( n_position )
|
334 |
|
|
return B_N_PDELIM_KEY(p_s_parent, n_position - 1);
|
335 |
|
|
}
|
336 |
|
|
/* Return MIN_KEY if we are in the root of the buffer tree. */
|
337 |
|
|
if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
|
338 |
|
|
SB_ROOT_BLOCK (p_s_sb) )
|
339 |
|
|
return &MIN_KEY;
|
340 |
|
|
return &MAX_KEY;
|
341 |
|
|
}
|
342 |
|
|
|
343 |
|
|
|
344 |
|
|
/* Get delimiting key of the buffer at the path and its right neighbor. */
|
345 |
|
|
inline const struct key * get_rkey (
|
346 |
|
|
const struct path * p_s_chk_path,
|
347 |
|
|
const struct super_block * p_s_sb
|
348 |
|
|
) {
|
349 |
|
|
int n_position,
|
350 |
|
|
n_path_offset = p_s_chk_path->path_length;
|
351 |
|
|
struct buffer_head * p_s_parent;
|
352 |
|
|
|
353 |
|
|
RFALSE( n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
|
354 |
|
|
"PAP-5030: illegal offset in the path");
|
355 |
|
|
|
356 |
|
|
while ( n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET ) {
|
357 |
|
|
|
358 |
|
|
RFALSE( ! buffer_uptodate(PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
|
359 |
|
|
"PAP-5040: parent is not uptodate");
|
360 |
|
|
|
361 |
|
|
/* Parent at the path is not in the tree now. */
|
362 |
|
|
if ( ! B_IS_IN_TREE(p_s_parent = PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)) )
|
363 |
|
|
return &MIN_KEY;
|
364 |
|
|
/* Check whether position in the parent is correct. */
|
365 |
|
|
if ( (n_position = PATH_OFFSET_POSITION(p_s_chk_path, n_path_offset)) > B_NR_ITEMS(p_s_parent) )
|
366 |
|
|
return &MIN_KEY;
|
367 |
|
|
/* Check whether parent at the path really points to the child. */
|
368 |
|
|
if ( B_N_CHILD_NUM(p_s_parent, n_position) !=
|
369 |
|
|
PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset + 1)->b_blocknr )
|
370 |
|
|
return &MIN_KEY;
|
371 |
|
|
/* Return delimiting key if position in the parent is not the last one. */
|
372 |
|
|
if ( n_position != B_NR_ITEMS(p_s_parent) )
|
373 |
|
|
return B_N_PDELIM_KEY(p_s_parent, n_position);
|
374 |
|
|
}
|
375 |
|
|
/* Return MAX_KEY if we are in the root of the buffer tree. */
|
376 |
|
|
if ( PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
|
377 |
|
|
SB_ROOT_BLOCK (p_s_sb) )
|
378 |
|
|
return &MAX_KEY;
|
379 |
|
|
return &MIN_KEY;
|
380 |
|
|
}
|
381 |
|
|
|
382 |
|
|
|
383 |
|
|
/* Check whether a key is contained in the tree rooted from a buffer at a path. */
|
384 |
|
|
/* This works by looking at the left and right delimiting keys for the buffer in the last path_element in
|
385 |
|
|
the path. These delimiting keys are stored at least one level above that buffer in the tree. If the
|
386 |
|
|
buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
|
387 |
|
|
this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
|
388 |
|
|
static inline int key_in_buffer (
|
389 |
|
|
struct path * p_s_chk_path, /* Path which should be checked. */
|
390 |
|
|
const struct cpu_key * p_s_key, /* Key which should be checked. */
|
391 |
|
|
struct super_block * p_s_sb /* Super block pointer. */
|
392 |
|
|
) {
|
393 |
|
|
|
394 |
|
|
RFALSE( ! p_s_key || p_s_chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET ||
|
395 |
|
|
p_s_chk_path->path_length > MAX_HEIGHT,
|
396 |
|
|
"PAP-5050: pointer to the key(%p) is NULL or illegal path length(%d)",
|
397 |
|
|
p_s_key, p_s_chk_path->path_length);
|
398 |
|
|
RFALSE( PATH_PLAST_BUFFER(p_s_chk_path)->b_dev == NODEV,
|
399 |
|
|
"PAP-5060: device must not be NODEV");
|
400 |
|
|
|
401 |
|
|
if ( COMP_KEYS(get_lkey(p_s_chk_path, p_s_sb), p_s_key) == 1 )
|
402 |
|
|
/* left delimiting key is bigger, that the key we look for */
|
403 |
|
|
return 0;
|
404 |
|
|
// if ( COMP_KEYS(p_s_key, get_rkey(p_s_chk_path, p_s_sb)) != -1 )
|
405 |
|
|
if ( COMP_KEYS(get_rkey(p_s_chk_path, p_s_sb), p_s_key) != 1 )
|
406 |
|
|
/* p_s_key must be less than right delimitiing key */
|
407 |
|
|
return 0;
|
408 |
|
|
return 1;
|
409 |
|
|
}
|
410 |
|
|
|
411 |
|
|
|
412 |
|
|
inline void decrement_bcount(
|
413 |
|
|
struct buffer_head * p_s_bh
|
414 |
|
|
) {
|
415 |
|
|
if ( p_s_bh ) {
|
416 |
|
|
if ( atomic_read (&(p_s_bh->b_count)) ) {
|
417 |
|
|
put_bh(p_s_bh) ;
|
418 |
|
|
return;
|
419 |
|
|
}
|
420 |
|
|
reiserfs_panic(NULL, "PAP-5070: decrement_bcount: trying to free free buffer %b", p_s_bh);
|
421 |
|
|
}
|
422 |
|
|
}
|
423 |
|
|
|
424 |
|
|
|
425 |
|
|
/* Decrement b_count field of the all buffers in the path. */
|
426 |
|
|
void decrement_counters_in_path (
|
427 |
|
|
struct path * p_s_search_path
|
428 |
|
|
) {
|
429 |
|
|
int n_path_offset = p_s_search_path->path_length;
|
430 |
|
|
|
431 |
|
|
RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET ||
|
432 |
|
|
n_path_offset > EXTENDED_MAX_HEIGHT - 1,
|
433 |
|
|
"PAP-5080: illegal path offset of %d", n_path_offset);
|
434 |
|
|
|
435 |
|
|
while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
|
436 |
|
|
struct buffer_head * bh;
|
437 |
|
|
|
438 |
|
|
bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
|
439 |
|
|
decrement_bcount (bh);
|
440 |
|
|
}
|
441 |
|
|
p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
|
442 |
|
|
}
|
443 |
|
|
|
444 |
|
|
|
445 |
|
|
int reiserfs_check_path(struct path *p) {
|
446 |
|
|
RFALSE( p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
|
447 |
|
|
"path not properly relsed") ;
|
448 |
|
|
return 0 ;
|
449 |
|
|
}
|
450 |
|
|
|
451 |
|
|
|
452 |
|
|
/* Release all buffers in the path. Restore dirty bits clean
|
453 |
|
|
** when preparing the buffer for the log
|
454 |
|
|
**
|
455 |
|
|
** only called from fix_nodes()
|
456 |
|
|
*/
|
457 |
|
|
void pathrelse_and_restore (
|
458 |
|
|
struct super_block *s,
|
459 |
|
|
struct path * p_s_search_path
|
460 |
|
|
) {
|
461 |
|
|
int n_path_offset = p_s_search_path->path_length;
|
462 |
|
|
|
463 |
|
|
RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
|
464 |
|
|
"clm-4000: illegal path offset");
|
465 |
|
|
|
466 |
|
|
while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET ) {
|
467 |
|
|
reiserfs_restore_prepared_buffer(s, PATH_OFFSET_PBUFFER(p_s_search_path,
|
468 |
|
|
n_path_offset));
|
469 |
|
|
brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
|
470 |
|
|
}
|
471 |
|
|
p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
|
472 |
|
|
}
|
473 |
|
|
|
474 |
|
|
/* Release all buffers in the path. */
|
475 |
|
|
void pathrelse (
|
476 |
|
|
struct path * p_s_search_path
|
477 |
|
|
) {
|
478 |
|
|
int n_path_offset = p_s_search_path->path_length;
|
479 |
|
|
|
480 |
|
|
RFALSE( n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
|
481 |
|
|
"PAP-5090: illegal path offset");
|
482 |
|
|
|
483 |
|
|
while ( n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET )
|
484 |
|
|
brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
|
485 |
|
|
|
486 |
|
|
p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
|
487 |
|
|
}
|
488 |
|
|
|
489 |
|
|
|
490 |
|
|
|
491 |
|
|
static int is_leaf (char * buf, int blocksize, struct buffer_head * bh)
|
492 |
|
|
{
|
493 |
|
|
struct block_head * blkh;
|
494 |
|
|
struct item_head * ih;
|
495 |
|
|
int used_space;
|
496 |
|
|
int prev_location;
|
497 |
|
|
int i;
|
498 |
|
|
int nr;
|
499 |
|
|
|
500 |
|
|
blkh = (struct block_head *)buf;
|
501 |
|
|
if ( blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) {
|
502 |
|
|
printk ("is_leaf: this should be caught earlier\n");
|
503 |
|
|
return 0;
|
504 |
|
|
}
|
505 |
|
|
|
506 |
|
|
nr = blkh_nr_item(blkh);
|
507 |
|
|
if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
|
508 |
|
|
/* item number is too big or too small */
|
509 |
|
|
reiserfs_warning (NULL, "is_leaf: nr_item seems wrong: %z\n", bh);
|
510 |
|
|
return 0;
|
511 |
|
|
}
|
512 |
|
|
ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
|
513 |
|
|
used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location (ih));
|
514 |
|
|
if (used_space != blocksize - blkh_free_space(blkh)) {
|
515 |
|
|
/* free space does not match to calculated amount of use space */
|
516 |
|
|
reiserfs_warning (NULL, "is_leaf: free space seems wrong: %z\n", bh);
|
517 |
|
|
return 0;
|
518 |
|
|
}
|
519 |
|
|
|
520 |
|
|
// FIXME: it is_leaf will hit performance too much - we may have
|
521 |
|
|
// return 1 here
|
522 |
|
|
|
523 |
|
|
/* check tables of item heads */
|
524 |
|
|
ih = (struct item_head *)(buf + BLKH_SIZE);
|
525 |
|
|
prev_location = blocksize;
|
526 |
|
|
for (i = 0; i < nr; i ++, ih ++) {
|
527 |
|
|
if ( le_ih_k_type(ih) == TYPE_ANY) {
|
528 |
|
|
reiserfs_warning (NULL, "is_leaf: wrong item type for item %h\n",ih);
|
529 |
|
|
return 0;
|
530 |
|
|
}
|
531 |
|
|
if (ih_location (ih) >= blocksize || ih_location (ih) < IH_SIZE * nr) {
|
532 |
|
|
reiserfs_warning (NULL, "is_leaf: item location seems wrong: %h\n", ih);
|
533 |
|
|
return 0;
|
534 |
|
|
}
|
535 |
|
|
if (ih_item_len (ih) < 1 || ih_item_len (ih) > MAX_ITEM_LEN (blocksize)) {
|
536 |
|
|
reiserfs_warning (NULL, "is_leaf: item length seems wrong: %h\n", ih);
|
537 |
|
|
return 0;
|
538 |
|
|
}
|
539 |
|
|
if (prev_location - ih_location (ih) != ih_item_len (ih)) {
|
540 |
|
|
reiserfs_warning (NULL, "is_leaf: item location seems wrong (second one): %h\n", ih);
|
541 |
|
|
return 0;
|
542 |
|
|
}
|
543 |
|
|
prev_location = ih_location (ih);
|
544 |
|
|
}
|
545 |
|
|
|
546 |
|
|
// one may imagine much more checks
|
547 |
|
|
return 1;
|
548 |
|
|
}
|
549 |
|
|
|
550 |
|
|
|
551 |
|
|
/* returns 1 if buf looks like an internal node, 0 otherwise */
|
552 |
|
|
static int is_internal (char * buf, int blocksize, struct buffer_head * bh)
|
553 |
|
|
{
|
554 |
|
|
struct block_head * blkh;
|
555 |
|
|
int nr;
|
556 |
|
|
int used_space;
|
557 |
|
|
|
558 |
|
|
blkh = (struct block_head *)buf;
|
559 |
|
|
nr = blkh_level(blkh);
|
560 |
|
|
if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) {
|
561 |
|
|
/* this level is not possible for internal nodes */
|
562 |
|
|
printk ("is_internal: this should be caught earlier\n");
|
563 |
|
|
return 0;
|
564 |
|
|
}
|
565 |
|
|
|
566 |
|
|
nr = blkh_nr_item(blkh);
|
567 |
|
|
if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
|
568 |
|
|
/* for internal which is not root we might check min number of keys */
|
569 |
|
|
reiserfs_warning (NULL, "is_internal: number of key seems wrong: %z\n", bh);
|
570 |
|
|
return 0;
|
571 |
|
|
}
|
572 |
|
|
|
573 |
|
|
used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
|
574 |
|
|
if (used_space != blocksize - blkh_free_space(blkh)) {
|
575 |
|
|
reiserfs_warning (NULL, "is_internal: free space seems wrong: %z\n", bh);
|
576 |
|
|
return 0;
|
577 |
|
|
}
|
578 |
|
|
|
579 |
|
|
// one may imagine much more checks
|
580 |
|
|
return 1;
|
581 |
|
|
}
|
582 |
|
|
|
583 |
|
|
|
584 |
|
|
// make sure that bh contains formatted node of reiserfs tree of
|
585 |
|
|
// 'level'-th level
|
586 |
|
|
static int is_tree_node (struct buffer_head * bh, int level)
|
587 |
|
|
{
|
588 |
|
|
if (B_LEVEL (bh) != level) {
|
589 |
|
|
printk ("is_tree_node: node level %d does not match to the expected one %d\n",
|
590 |
|
|
B_LEVEL (bh), level);
|
591 |
|
|
return 0;
|
592 |
|
|
}
|
593 |
|
|
if (level == DISK_LEAF_NODE_LEVEL)
|
594 |
|
|
return is_leaf (bh->b_data, bh->b_size, bh);
|
595 |
|
|
|
596 |
|
|
return is_internal (bh->b_data, bh->b_size, bh);
|
597 |
|
|
}
|
598 |
|
|
|
599 |
|
|
|
600 |
|
|
|
601 |
|
|
#ifdef SEARCH_BY_KEY_READA
|
602 |
|
|
|
603 |
|
|
/* The function is NOT SCHEDULE-SAFE! */
|
604 |
|
|
static void search_by_key_reada (struct super_block * s, int blocknr)
|
605 |
|
|
{
|
606 |
|
|
struct buffer_head * bh;
|
607 |
|
|
|
608 |
|
|
if (blocknr == 0)
|
609 |
|
|
return;
|
610 |
|
|
|
611 |
|
|
bh = getblk (s->s_dev, blocknr, s->s_blocksize);
|
612 |
|
|
|
613 |
|
|
if (!buffer_uptodate (bh)) {
|
614 |
|
|
ll_rw_block (READA, 1, &bh);
|
615 |
|
|
}
|
616 |
|
|
bh->b_count --;
|
617 |
|
|
}
|
618 |
|
|
|
619 |
|
|
#endif
|
620 |
|
|
|
621 |
|
|
/**************************************************************************
|
622 |
|
|
* Algorithm SearchByKey *
|
623 |
|
|
* look for item in the Disk S+Tree by its key *
|
624 |
|
|
* Input: p_s_sb - super block *
|
625 |
|
|
* p_s_key - pointer to the key to search *
|
626 |
|
|
* Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
|
627 |
|
|
* p_s_search_path - path from the root to the needed leaf *
|
628 |
|
|
**************************************************************************/
|
629 |
|
|
|
630 |
|
|
/* This function fills up the path from the root to the leaf as it
|
631 |
|
|
descends the tree looking for the key. It uses reiserfs_bread to
|
632 |
|
|
try to find buffers in the cache given their block number. If it
|
633 |
|
|
does not find them in the cache it reads them from disk. For each
|
634 |
|
|
node search_by_key finds using reiserfs_bread it then uses
|
635 |
|
|
bin_search to look through that node. bin_search will find the
|
636 |
|
|
position of the block_number of the next node if it is looking
|
637 |
|
|
through an internal node. If it is looking through a leaf node
|
638 |
|
|
bin_search will find the position of the item which has key either
|
639 |
|
|
equal to given key, or which is the maximal key less than the given
|
640 |
|
|
key. search_by_key returns a path that must be checked for the
|
641 |
|
|
correctness of the top of the path but need not be checked for the
|
642 |
|
|
correctness of the bottom of the path */
|
643 |
|
|
/* The function is NOT SCHEDULE-SAFE! */
|
644 |
|
|
int search_by_key (struct super_block * p_s_sb,
|
645 |
|
|
const struct cpu_key * p_s_key, /* Key to search. */
|
646 |
|
|
struct path * p_s_search_path, /* This structure was
|
647 |
|
|
allocated and initialized
|
648 |
|
|
by the calling
|
649 |
|
|
function. It is filled up
|
650 |
|
|
by this function. */
|
651 |
|
|
int n_stop_level /* How far down the tree to search. To
|
652 |
|
|
stop at leaf level - set to
|
653 |
|
|
DISK_LEAF_NODE_LEVEL */
|
654 |
|
|
) {
|
655 |
|
|
int n_block_number = SB_ROOT_BLOCK (p_s_sb),
|
656 |
|
|
expected_level = SB_TREE_HEIGHT (p_s_sb),
|
657 |
|
|
n_block_size = p_s_sb->s_blocksize;
|
658 |
|
|
struct buffer_head * p_s_bh;
|
659 |
|
|
struct path_element * p_s_last_element;
|
660 |
|
|
int n_node_level, n_retval;
|
661 |
|
|
int right_neighbor_of_leaf_node;
|
662 |
|
|
int fs_gen;
|
663 |
|
|
|
664 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
665 |
|
|
int n_repeat_counter = 0;
|
666 |
|
|
#endif
|
667 |
|
|
|
668 |
|
|
PROC_INFO_INC( p_s_sb, search_by_key );
|
669 |
|
|
|
670 |
|
|
/* As we add each node to a path we increase its count. This means that
|
671 |
|
|
we must be careful to release all nodes in a path before we either
|
672 |
|
|
discard the path struct or re-use the path struct, as we do here. */
|
673 |
|
|
|
674 |
|
|
decrement_counters_in_path(p_s_search_path);
|
675 |
|
|
|
676 |
|
|
right_neighbor_of_leaf_node = 0;
|
677 |
|
|
|
678 |
|
|
/* With each iteration of this loop we search through the items in the
|
679 |
|
|
current node, and calculate the next current node(next path element)
|
680 |
|
|
for the next iteration of this loop.. */
|
681 |
|
|
while ( 1 ) {
|
682 |
|
|
|
683 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
684 |
|
|
if ( !(++n_repeat_counter % 50000) )
|
685 |
|
|
reiserfs_warning (p_s_sb, "PAP-5100: search_by_key: %s:"
|
686 |
|
|
"there were %d iterations of while loop "
|
687 |
|
|
"looking for key %K\n",
|
688 |
|
|
current->comm, n_repeat_counter, p_s_key);
|
689 |
|
|
#endif
|
690 |
|
|
|
691 |
|
|
/* prep path to have another element added to it. */
|
692 |
|
|
p_s_last_element = PATH_OFFSET_PELEMENT(p_s_search_path, ++p_s_search_path->path_length);
|
693 |
|
|
fs_gen = get_generation (p_s_sb);
|
694 |
|
|
expected_level --;
|
695 |
|
|
|
696 |
|
|
#ifdef SEARCH_BY_KEY_READA
|
697 |
|
|
/* schedule read of right neighbor */
|
698 |
|
|
search_by_key_reada (p_s_sb, right_neighbor_of_leaf_node);
|
699 |
|
|
#endif
|
700 |
|
|
|
701 |
|
|
/* Read the next tree node, and set the last element in the path to
|
702 |
|
|
have a pointer to it. */
|
703 |
|
|
if ( ! (p_s_bh = p_s_last_element->pe_buffer =
|
704 |
|
|
reiserfs_bread(p_s_sb, n_block_number, n_block_size)) ) {
|
705 |
|
|
p_s_search_path->path_length --;
|
706 |
|
|
pathrelse(p_s_search_path);
|
707 |
|
|
return IO_ERROR;
|
708 |
|
|
}
|
709 |
|
|
|
710 |
|
|
if( fs_changed (fs_gen, p_s_sb) ) {
|
711 |
|
|
PROC_INFO_INC( p_s_sb, search_by_key_fs_changed );
|
712 |
|
|
PROC_INFO_INC( p_s_sb, sbk_fs_changed[ expected_level - 1 ] );
|
713 |
|
|
}
|
714 |
|
|
|
715 |
|
|
/* It is possible that schedule occurred. We must check whether the key
|
716 |
|
|
to search is still in the tree rooted from the current buffer. If
|
717 |
|
|
not then repeat search from the root. */
|
718 |
|
|
if ( fs_changed (fs_gen, p_s_sb) &&
|
719 |
|
|
(!B_IS_IN_TREE (p_s_bh) || !key_in_buffer(p_s_search_path, p_s_key, p_s_sb)) ) {
|
720 |
|
|
PROC_INFO_INC( p_s_sb, search_by_key_restarted );
|
721 |
|
|
PROC_INFO_INC( p_s_sb, sbk_restarted[ expected_level - 1 ] );
|
722 |
|
|
decrement_counters_in_path(p_s_search_path);
|
723 |
|
|
|
724 |
|
|
/* Get the root block number so that we can repeat the search
|
725 |
|
|
starting from the root. */
|
726 |
|
|
n_block_number = SB_ROOT_BLOCK (p_s_sb);
|
727 |
|
|
expected_level = SB_TREE_HEIGHT (p_s_sb);
|
728 |
|
|
right_neighbor_of_leaf_node = 0;
|
729 |
|
|
|
730 |
|
|
/* repeat search from the root */
|
731 |
|
|
continue;
|
732 |
|
|
}
|
733 |
|
|
|
734 |
|
|
/* only check that the key is in the buffer if p_s_key is not
|
735 |
|
|
equal to the MAX_KEY. Latter case is only possible in
|
736 |
|
|
"finish_unfinished()" processing during mount. */
|
737 |
|
|
RFALSE( COMP_KEYS( &MAX_KEY, p_s_key ) &&
|
738 |
|
|
! key_in_buffer(p_s_search_path, p_s_key, p_s_sb),
|
739 |
|
|
"PAP-5130: key is not in the buffer");
|
740 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
741 |
|
|
if ( cur_tb ) {
|
742 |
|
|
print_cur_tb ("5140");
|
743 |
|
|
reiserfs_panic(p_s_sb, "PAP-5140: search_by_key: schedule occurred in do_balance!");
|
744 |
|
|
}
|
745 |
|
|
#endif
|
746 |
|
|
|
747 |
|
|
// make sure, that the node contents look like a node of
|
748 |
|
|
// certain level
|
749 |
|
|
if (!is_tree_node (p_s_bh, expected_level)) {
|
750 |
|
|
reiserfs_warning (p_s_sb, "vs-5150: search_by_key: "
|
751 |
|
|
"invalid format found in block %ld. Fsck?\n",
|
752 |
|
|
p_s_bh->b_blocknr);
|
753 |
|
|
pathrelse (p_s_search_path);
|
754 |
|
|
return IO_ERROR;
|
755 |
|
|
}
|
756 |
|
|
|
757 |
|
|
/* ok, we have acquired next formatted node in the tree */
|
758 |
|
|
n_node_level = B_LEVEL (p_s_bh);
|
759 |
|
|
|
760 |
|
|
PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level - 1 );
|
761 |
|
|
|
762 |
|
|
RFALSE( n_node_level < n_stop_level,
|
763 |
|
|
"vs-5152: tree level (%d) is less than stop level (%d)",
|
764 |
|
|
n_node_level, n_stop_level);
|
765 |
|
|
|
766 |
|
|
n_retval = bin_search( p_s_key, B_N_PITEM_HEAD(p_s_bh, 0),
|
767 |
|
|
B_NR_ITEMS(p_s_bh),
|
768 |
|
|
( n_node_level == DISK_LEAF_NODE_LEVEL ) ? IH_SIZE : KEY_SIZE,
|
769 |
|
|
&(p_s_last_element->pe_position));
|
770 |
|
|
if (n_node_level == n_stop_level) {
|
771 |
|
|
return n_retval;
|
772 |
|
|
}
|
773 |
|
|
|
774 |
|
|
/* we are not in the stop level */
|
775 |
|
|
if (n_retval == ITEM_FOUND)
|
776 |
|
|
/* item has been found, so we choose the pointer which is to the right of the found one */
|
777 |
|
|
p_s_last_element->pe_position++;
|
778 |
|
|
|
779 |
|
|
/* if item was not found we choose the position which is to
|
780 |
|
|
the left of the found item. This requires no code,
|
781 |
|
|
bin_search did it already.*/
|
782 |
|
|
|
783 |
|
|
/* So we have chosen a position in the current node which is
|
784 |
|
|
an internal node. Now we calculate child block number by
|
785 |
|
|
position in the node. */
|
786 |
|
|
n_block_number = B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position);
|
787 |
|
|
|
788 |
|
|
#ifdef SEARCH_BY_KEY_READA
|
789 |
|
|
/* if we are going to read leaf node, then calculate its right neighbor if possible */
|
790 |
|
|
if (n_node_level == DISK_LEAF_NODE_LEVEL + 1 && p_s_last_element->pe_position < B_NR_ITEMS (p_s_bh))
|
791 |
|
|
right_neighbor_of_leaf_node = B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position + 1);
|
792 |
|
|
#endif
|
793 |
|
|
}
|
794 |
|
|
}
|
795 |
|
|
|
796 |
|
|
|
797 |
|
|
/* Form the path to an item and position in this item which contains
|
798 |
|
|
file byte defined by p_s_key. If there is no such item
|
799 |
|
|
corresponding to the key, we point the path to the item with
|
800 |
|
|
maximal key less than p_s_key, and *p_n_pos_in_item is set to one
|
801 |
|
|
past the last entry/byte in the item. If searching for entry in a
|
802 |
|
|
directory item, and it is not found, *p_n_pos_in_item is set to one
|
803 |
|
|
entry more than the entry with maximal key which is less than the
|
804 |
|
|
sought key.
|
805 |
|
|
|
806 |
|
|
Note that if there is no entry in this same node which is one more,
|
807 |
|
|
then we point to an imaginary entry. for direct items, the
|
808 |
|
|
position is in units of bytes, for indirect items the position is
|
809 |
|
|
in units of blocknr entries, for directory items the position is in
|
810 |
|
|
units of directory entries. */
|
811 |
|
|
|
812 |
|
|
/* The function is NOT SCHEDULE-SAFE! */
|
813 |
|
|
int search_for_position_by_key (struct super_block * p_s_sb, /* Pointer to the super block. */
|
814 |
|
|
const struct cpu_key * p_cpu_key, /* Key to search (cpu variable) */
|
815 |
|
|
struct path * p_s_search_path /* Filled up by this function. */
|
816 |
|
|
) {
|
817 |
|
|
struct item_head * p_le_ih; /* pointer to on-disk structure */
|
818 |
|
|
int n_blk_size;
|
819 |
|
|
loff_t item_offset, offset;
|
820 |
|
|
struct reiserfs_dir_entry de;
|
821 |
|
|
int retval;
|
822 |
|
|
|
823 |
|
|
/* If searching for directory entry. */
|
824 |
|
|
if ( is_direntry_cpu_key (p_cpu_key) )
|
825 |
|
|
return search_by_entry_key (p_s_sb, p_cpu_key, p_s_search_path, &de);
|
826 |
|
|
|
827 |
|
|
/* If not searching for directory entry. */
|
828 |
|
|
|
829 |
|
|
/* If item is found. */
|
830 |
|
|
retval = search_item (p_s_sb, p_cpu_key, p_s_search_path);
|
831 |
|
|
if (retval == IO_ERROR)
|
832 |
|
|
return retval;
|
833 |
|
|
if ( retval == ITEM_FOUND ) {
|
834 |
|
|
|
835 |
|
|
RFALSE( ! ih_item_len(
|
836 |
|
|
B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path),
|
837 |
|
|
PATH_LAST_POSITION(p_s_search_path))),
|
838 |
|
|
"PAP-5165: item length equals zero");
|
839 |
|
|
|
840 |
|
|
pos_in_item(p_s_search_path) = 0;
|
841 |
|
|
return POSITION_FOUND;
|
842 |
|
|
}
|
843 |
|
|
|
844 |
|
|
RFALSE( ! PATH_LAST_POSITION(p_s_search_path),
|
845 |
|
|
"PAP-5170: position equals zero");
|
846 |
|
|
|
847 |
|
|
/* Item is not found. Set path to the previous item. */
|
848 |
|
|
p_le_ih = B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path), --PATH_LAST_POSITION(p_s_search_path));
|
849 |
|
|
n_blk_size = p_s_sb->s_blocksize;
|
850 |
|
|
|
851 |
|
|
if (comp_short_keys (&(p_le_ih->ih_key), p_cpu_key)) {
|
852 |
|
|
return FILE_NOT_FOUND;
|
853 |
|
|
}
|
854 |
|
|
|
855 |
|
|
// FIXME: quite ugly this far
|
856 |
|
|
|
857 |
|
|
item_offset = le_ih_k_offset (p_le_ih);
|
858 |
|
|
offset = cpu_key_k_offset (p_cpu_key);
|
859 |
|
|
|
860 |
|
|
/* Needed byte is contained in the item pointed to by the path.*/
|
861 |
|
|
if (item_offset <= offset &&
|
862 |
|
|
item_offset + op_bytes_number (p_le_ih, n_blk_size) > offset) {
|
863 |
|
|
pos_in_item (p_s_search_path) = offset - item_offset;
|
864 |
|
|
if ( is_indirect_le_ih(p_le_ih) ) {
|
865 |
|
|
pos_in_item (p_s_search_path) /= n_blk_size;
|
866 |
|
|
}
|
867 |
|
|
return POSITION_FOUND;
|
868 |
|
|
}
|
869 |
|
|
|
870 |
|
|
/* Needed byte is not contained in the item pointed to by the
|
871 |
|
|
path. Set pos_in_item out of the item. */
|
872 |
|
|
if ( is_indirect_le_ih (p_le_ih) )
|
873 |
|
|
pos_in_item (p_s_search_path) = ih_item_len(p_le_ih) / UNFM_P_SIZE;
|
874 |
|
|
else
|
875 |
|
|
pos_in_item (p_s_search_path) = ih_item_len( p_le_ih );
|
876 |
|
|
|
877 |
|
|
return POSITION_NOT_FOUND;
|
878 |
|
|
}
|
879 |
|
|
|
880 |
|
|
|
881 |
|
|
/* Compare given item and item pointed to by the path. */
|
882 |
|
|
int comp_items (const struct item_head * stored_ih, const struct path * p_s_path)
|
883 |
|
|
{
|
884 |
|
|
struct buffer_head * p_s_bh;
|
885 |
|
|
struct item_head * ih;
|
886 |
|
|
|
887 |
|
|
/* Last buffer at the path is not in the tree. */
|
888 |
|
|
if ( ! B_IS_IN_TREE(p_s_bh = PATH_PLAST_BUFFER(p_s_path)) )
|
889 |
|
|
return 1;
|
890 |
|
|
|
891 |
|
|
/* Last path position is invalid. */
|
892 |
|
|
if ( PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(p_s_bh) )
|
893 |
|
|
return 1;
|
894 |
|
|
|
895 |
|
|
/* we need only to know, whether it is the same item */
|
896 |
|
|
ih = get_ih (p_s_path);
|
897 |
|
|
return memcmp (stored_ih, ih, IH_SIZE);
|
898 |
|
|
}
|
899 |
|
|
|
900 |
|
|
|
901 |
|
|
/* unformatted nodes are not logged anymore, ever. This is safe
|
902 |
|
|
** now
|
903 |
|
|
*/
|
904 |
|
|
#define held_by_others(bh) (atomic_read(&(bh)->b_count) > 1)
|
905 |
|
|
|
906 |
|
|
// block can not be forgotten as it is in I/O or held by someone
|
907 |
|
|
#define block_in_use(bh) (buffer_locked(bh) || (held_by_others(bh)))
|
908 |
|
|
|
909 |
|
|
|
910 |
|
|
|
911 |
|
|
// prepare for delete or cut of direct item
|
912 |
|
|
static inline int prepare_for_direct_item (struct path * path,
|
913 |
|
|
struct item_head * le_ih,
|
914 |
|
|
struct inode * inode,
|
915 |
|
|
loff_t new_file_length,
|
916 |
|
|
int * cut_size)
|
917 |
|
|
{
|
918 |
|
|
loff_t round_len;
|
919 |
|
|
|
920 |
|
|
|
921 |
|
|
if ( new_file_length == max_reiserfs_offset (inode) ) {
|
922 |
|
|
/* item has to be deleted */
|
923 |
|
|
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
|
924 |
|
|
return M_DELETE;
|
925 |
|
|
}
|
926 |
|
|
|
927 |
|
|
// new file gets truncated
|
928 |
|
|
if (get_inode_item_key_version (inode) == KEY_FORMAT_3_6) {
|
929 |
|
|
//
|
930 |
|
|
round_len = ROUND_UP (new_file_length);
|
931 |
|
|
/* this was n_new_file_length < le_ih ... */
|
932 |
|
|
if ( round_len < le_ih_k_offset (le_ih) ) {
|
933 |
|
|
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
|
934 |
|
|
return M_DELETE; /* Delete this item. */
|
935 |
|
|
}
|
936 |
|
|
/* Calculate first position and size for cutting from item. */
|
937 |
|
|
pos_in_item (path) = round_len - (le_ih_k_offset (le_ih) - 1);
|
938 |
|
|
*cut_size = -(ih_item_len(le_ih) - pos_in_item(path));
|
939 |
|
|
|
940 |
|
|
return M_CUT; /* Cut from this item. */
|
941 |
|
|
}
|
942 |
|
|
|
943 |
|
|
|
944 |
|
|
// old file: items may have any length
|
945 |
|
|
|
946 |
|
|
if ( new_file_length < le_ih_k_offset (le_ih) ) {
|
947 |
|
|
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
|
948 |
|
|
return M_DELETE; /* Delete this item. */
|
949 |
|
|
}
|
950 |
|
|
/* Calculate first position and size for cutting from item. */
|
951 |
|
|
*cut_size = -(ih_item_len(le_ih) -
|
952 |
|
|
(pos_in_item (path) = new_file_length + 1 - le_ih_k_offset (le_ih)));
|
953 |
|
|
return M_CUT; /* Cut from this item. */
|
954 |
|
|
}
|
955 |
|
|
|
956 |
|
|
|
957 |
|
|
static inline int prepare_for_direntry_item (struct path * path,
|
958 |
|
|
struct item_head * le_ih,
|
959 |
|
|
struct inode * inode,
|
960 |
|
|
loff_t new_file_length,
|
961 |
|
|
int * cut_size)
|
962 |
|
|
{
|
963 |
|
|
if (le_ih_k_offset (le_ih) == DOT_OFFSET &&
|
964 |
|
|
new_file_length == max_reiserfs_offset (inode)) {
|
965 |
|
|
RFALSE( ih_entry_count (le_ih) != 2,
|
966 |
|
|
"PAP-5220: incorrect empty directory item (%h)", le_ih);
|
967 |
|
|
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
|
968 |
|
|
return M_DELETE; /* Delete the directory item containing "." and ".." entry. */
|
969 |
|
|
}
|
970 |
|
|
|
971 |
|
|
if ( ih_entry_count (le_ih) == 1 ) {
|
972 |
|
|
/* Delete the directory item such as there is one record only
|
973 |
|
|
in this item*/
|
974 |
|
|
*cut_size = -(IH_SIZE + ih_item_len(le_ih));
|
975 |
|
|
return M_DELETE;
|
976 |
|
|
}
|
977 |
|
|
|
978 |
|
|
/* Cut one record from the directory item. */
|
979 |
|
|
*cut_size = -(DEH_SIZE + entry_length (get_last_bh (path), le_ih, pos_in_item (path)));
|
980 |
|
|
return M_CUT;
|
981 |
|
|
}
|
982 |
|
|
|
983 |
|
|
|
984 |
|
|
/* If the path points to a directory or direct item, calculate mode and the size cut, for balance.
|
985 |
|
|
If the path points to an indirect item, remove some number of its unformatted nodes.
|
986 |
|
|
In case of file truncate calculate whether this item must be deleted/truncated or last
|
987 |
|
|
unformatted node of this item will be converted to a direct item.
|
988 |
|
|
This function returns a determination of what balance mode the calling function should employ. */
|
989 |
|
|
static char prepare_for_delete_or_cut(
|
990 |
|
|
struct reiserfs_transaction_handle *th,
|
991 |
|
|
struct inode * inode,
|
992 |
|
|
struct path * p_s_path,
|
993 |
|
|
const struct cpu_key * p_s_item_key,
|
994 |
|
|
int * p_n_removed, /* Number of unformatted nodes which were removed
|
995 |
|
|
from end of the file. */
|
996 |
|
|
int * p_n_cut_size,
|
997 |
|
|
unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
|
998 |
|
|
) {
|
999 |
|
|
struct super_block * p_s_sb = inode->i_sb;
|
1000 |
|
|
struct item_head * p_le_ih = PATH_PITEM_HEAD(p_s_path);
|
1001 |
|
|
struct buffer_head * p_s_bh = PATH_PLAST_BUFFER(p_s_path);
|
1002 |
|
|
|
1003 |
|
|
/* Stat_data item. */
|
1004 |
|
|
if ( is_statdata_le_ih (p_le_ih) ) {
|
1005 |
|
|
|
1006 |
|
|
RFALSE( n_new_file_length != max_reiserfs_offset (inode),
|
1007 |
|
|
"PAP-5210: mode must be M_DELETE");
|
1008 |
|
|
|
1009 |
|
|
*p_n_cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
|
1010 |
|
|
return M_DELETE;
|
1011 |
|
|
}
|
1012 |
|
|
|
1013 |
|
|
|
1014 |
|
|
/* Directory item. */
|
1015 |
|
|
if ( is_direntry_le_ih (p_le_ih) )
|
1016 |
|
|
return prepare_for_direntry_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
|
1017 |
|
|
|
1018 |
|
|
/* Direct item. */
|
1019 |
|
|
if ( is_direct_le_ih (p_le_ih) )
|
1020 |
|
|
return prepare_for_direct_item (p_s_path, p_le_ih, inode, n_new_file_length, p_n_cut_size);
|
1021 |
|
|
|
1022 |
|
|
|
1023 |
|
|
/* Case of an indirect item. */
|
1024 |
|
|
{
|
1025 |
|
|
int n_unfm_number, /* Number of the item unformatted nodes. */
|
1026 |
|
|
n_counter,
|
1027 |
|
|
n_blk_size;
|
1028 |
|
|
__u32 * p_n_unfm_pointer; /* Pointer to the unformatted node number. */
|
1029 |
|
|
__u32 tmp;
|
1030 |
|
|
struct item_head s_ih; /* Item header. */
|
1031 |
|
|
char c_mode; /* Returned mode of the balance. */
|
1032 |
|
|
int need_research;
|
1033 |
|
|
|
1034 |
|
|
|
1035 |
|
|
n_blk_size = p_s_sb->s_blocksize;
|
1036 |
|
|
|
1037 |
|
|
/* Search for the needed object indirect item until there are no unformatted nodes to be removed. */
|
1038 |
|
|
do {
|
1039 |
|
|
need_research = 0;
|
1040 |
|
|
p_s_bh = PATH_PLAST_BUFFER(p_s_path);
|
1041 |
|
|
/* Copy indirect item header to a temp variable. */
|
1042 |
|
|
copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
|
1043 |
|
|
/* Calculate number of unformatted nodes in this item. */
|
1044 |
|
|
n_unfm_number = I_UNFM_NUM(&s_ih);
|
1045 |
|
|
|
1046 |
|
|
RFALSE( ! is_indirect_le_ih(&s_ih) || ! n_unfm_number ||
|
1047 |
|
|
pos_in_item (p_s_path) + 1 != n_unfm_number,
|
1048 |
|
|
"PAP-5240: illegal item %h "
|
1049 |
|
|
"n_unfm_number = %d *p_n_pos_in_item = %d",
|
1050 |
|
|
&s_ih, n_unfm_number, pos_in_item (p_s_path));
|
1051 |
|
|
|
1052 |
|
|
/* Calculate balance mode and position in the item to remove unformatted nodes. */
|
1053 |
|
|
if ( n_new_file_length == max_reiserfs_offset (inode) ) {/* Case of delete. */
|
1054 |
|
|
pos_in_item (p_s_path) = 0;
|
1055 |
|
|
*p_n_cut_size = -(IH_SIZE + ih_item_len(&s_ih));
|
1056 |
|
|
c_mode = M_DELETE;
|
1057 |
|
|
}
|
1058 |
|
|
else { /* Case of truncate. */
|
1059 |
|
|
if ( n_new_file_length < le_ih_k_offset (&s_ih) ) {
|
1060 |
|
|
pos_in_item (p_s_path) = 0;
|
1061 |
|
|
*p_n_cut_size = -(IH_SIZE + ih_item_len(&s_ih));
|
1062 |
|
|
c_mode = M_DELETE; /* Delete this item. */
|
1063 |
|
|
}
|
1064 |
|
|
else {
|
1065 |
|
|
/* indirect item must be truncated starting from *p_n_pos_in_item-th position */
|
1066 |
|
|
pos_in_item (p_s_path) = (n_new_file_length + n_blk_size - le_ih_k_offset (&s_ih) ) >> p_s_sb->s_blocksize_bits;
|
1067 |
|
|
|
1068 |
|
|
RFALSE( pos_in_item (p_s_path) > n_unfm_number,
|
1069 |
|
|
"PAP-5250: illegal position in the item");
|
1070 |
|
|
|
1071 |
|
|
/* Either convert last unformatted node of indirect item to direct item or increase
|
1072 |
|
|
its free space. */
|
1073 |
|
|
if ( pos_in_item (p_s_path) == n_unfm_number ) {
|
1074 |
|
|
*p_n_cut_size = 0; /* Nothing to cut. */
|
1075 |
|
|
return M_CONVERT; /* Maybe convert last unformatted node to the direct item. */
|
1076 |
|
|
}
|
1077 |
|
|
/* Calculate size to cut. */
|
1078 |
|
|
*p_n_cut_size = -(ih_item_len(&s_ih) - pos_in_item(p_s_path) * UNFM_P_SIZE);
|
1079 |
|
|
|
1080 |
|
|
c_mode = M_CUT; /* Cut from this indirect item. */
|
1081 |
|
|
}
|
1082 |
|
|
}
|
1083 |
|
|
|
1084 |
|
|
RFALSE( n_unfm_number <= pos_in_item (p_s_path),
|
1085 |
|
|
"PAP-5260: illegal position in the indirect item");
|
1086 |
|
|
|
1087 |
|
|
/* pointers to be cut */
|
1088 |
|
|
n_unfm_number -= pos_in_item (p_s_path);
|
1089 |
|
|
/* Set pointer to the last unformatted node pointer that is to be cut. */
|
1090 |
|
|
p_n_unfm_pointer = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1 - *p_n_removed;
|
1091 |
|
|
|
1092 |
|
|
|
1093 |
|
|
/* We go through the unformatted nodes pointers of the indirect
|
1094 |
|
|
item and look for the unformatted nodes in the cache. If we
|
1095 |
|
|
found some of them we free it, zero corresponding indirect item
|
1096 |
|
|
entry and log buffer containing that indirect item. For this we
|
1097 |
|
|
need to prepare last path element for logging. If some
|
1098 |
|
|
unformatted node has b_count > 1 we must not free this
|
1099 |
|
|
unformatted node since it is in use. */
|
1100 |
|
|
reiserfs_prepare_for_journal(p_s_sb, p_s_bh, 1);
|
1101 |
|
|
// note: path could be changed, first line in for loop takes care
|
1102 |
|
|
// of it
|
1103 |
|
|
|
1104 |
|
|
for (n_counter = *p_n_removed;
|
1105 |
|
|
n_counter < n_unfm_number; n_counter++, p_n_unfm_pointer-- ) {
|
1106 |
|
|
|
1107 |
|
|
if (item_moved (&s_ih, p_s_path)) {
|
1108 |
|
|
need_research = 1 ;
|
1109 |
|
|
break;
|
1110 |
|
|
}
|
1111 |
|
|
RFALSE( p_n_unfm_pointer < (__u32 *)B_I_PITEM(p_s_bh, &s_ih) ||
|
1112 |
|
|
p_n_unfm_pointer > (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1,
|
1113 |
|
|
"vs-5265: pointer out of range");
|
1114 |
|
|
|
1115 |
|
|
/* Hole, nothing to remove. */
|
1116 |
|
|
if ( ! get_block_num(p_n_unfm_pointer,0) ) {
|
1117 |
|
|
(*p_n_removed)++;
|
1118 |
|
|
continue;
|
1119 |
|
|
}
|
1120 |
|
|
|
1121 |
|
|
(*p_n_removed)++;
|
1122 |
|
|
|
1123 |
|
|
tmp = get_block_num(p_n_unfm_pointer,0);
|
1124 |
|
|
put_block_num(p_n_unfm_pointer, 0, 0);
|
1125 |
|
|
journal_mark_dirty (th, p_s_sb, p_s_bh);
|
1126 |
|
|
inode->i_blocks -= p_s_sb->s_blocksize / 512;
|
1127 |
|
|
reiserfs_free_block(th, tmp);
|
1128 |
|
|
/* In case of big fragmentation it is possible that each block
|
1129 |
|
|
freed will cause dirtying of one more bitmap and then we will
|
1130 |
|
|
quickly overflow our transaction space. This is a
|
1131 |
|
|
counter-measure against that scenario */
|
1132 |
|
|
if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
|
1133 |
|
|
int orig_len_alloc = th->t_blocks_allocated ;
|
1134 |
|
|
pathrelse(p_s_path) ;
|
1135 |
|
|
|
1136 |
|
|
journal_end(th, p_s_sb, orig_len_alloc) ;
|
1137 |
|
|
journal_begin(th, p_s_sb, orig_len_alloc) ;
|
1138 |
|
|
reiserfs_update_inode_transaction(inode) ;
|
1139 |
|
|
need_research = 1;
|
1140 |
|
|
break;
|
1141 |
|
|
}
|
1142 |
|
|
|
1143 |
|
|
if ( item_moved (&s_ih, p_s_path) ) {
|
1144 |
|
|
need_research = 1;
|
1145 |
|
|
break ;
|
1146 |
|
|
}
|
1147 |
|
|
}
|
1148 |
|
|
|
1149 |
|
|
/* a trick. If the buffer has been logged, this
|
1150 |
|
|
** will do nothing. If we've broken the loop without
|
1151 |
|
|
** logging it, it will restore the buffer
|
1152 |
|
|
**
|
1153 |
|
|
*/
|
1154 |
|
|
reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh);
|
1155 |
|
|
|
1156 |
|
|
/* This loop can be optimized. */
|
1157 |
|
|
} while ( (*p_n_removed < n_unfm_number || need_research) &&
|
1158 |
|
|
search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND );
|
1159 |
|
|
|
1160 |
|
|
RFALSE( *p_n_removed < n_unfm_number,
|
1161 |
|
|
"PAP-5310: indirect item is not found");
|
1162 |
|
|
RFALSE( item_moved (&s_ih, p_s_path),
|
1163 |
|
|
"after while, comp failed, retry") ;
|
1164 |
|
|
|
1165 |
|
|
if (c_mode == M_CUT)
|
1166 |
|
|
pos_in_item (p_s_path) *= UNFM_P_SIZE;
|
1167 |
|
|
return c_mode;
|
1168 |
|
|
}
|
1169 |
|
|
}
|
1170 |
|
|
|
1171 |
|
|
|
1172 |
|
|
/* Calculate bytes number which will be deleted or cutted in the balance. */
|
1173 |
|
|
int calc_deleted_bytes_number(
|
1174 |
|
|
struct tree_balance * p_s_tb,
|
1175 |
|
|
char c_mode
|
1176 |
|
|
) {
|
1177 |
|
|
int n_del_size;
|
1178 |
|
|
struct item_head * p_le_ih = PATH_PITEM_HEAD(p_s_tb->tb_path);
|
1179 |
|
|
|
1180 |
|
|
if ( is_statdata_le_ih (p_le_ih) )
|
1181 |
|
|
return 0;
|
1182 |
|
|
|
1183 |
|
|
if ( is_direntry_le_ih (p_le_ih) ) {
|
1184 |
|
|
// return EMPTY_DIR_SIZE; /* We delete emty directoris only. */
|
1185 |
|
|
// we can't use EMPTY_DIR_SIZE, as old format dirs have a different
|
1186 |
|
|
// empty size. ick. FIXME, is this right?
|
1187 |
|
|
//
|
1188 |
|
|
return ih_item_len(p_le_ih);
|
1189 |
|
|
}
|
1190 |
|
|
n_del_size = ( c_mode == M_DELETE ) ? ih_item_len(p_le_ih) : -p_s_tb->insert_size[0];
|
1191 |
|
|
|
1192 |
|
|
if ( is_indirect_le_ih (p_le_ih) )
|
1193 |
|
|
n_del_size = (n_del_size/UNFM_P_SIZE)*
|
1194 |
|
|
(PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_size);// - get_ih_free_space (p_le_ih);
|
1195 |
|
|
return n_del_size;
|
1196 |
|
|
}
|
1197 |
|
|
|
1198 |
|
|
static void init_tb_struct(
|
1199 |
|
|
struct reiserfs_transaction_handle *th,
|
1200 |
|
|
struct tree_balance * p_s_tb,
|
1201 |
|
|
struct super_block * p_s_sb,
|
1202 |
|
|
struct path * p_s_path,
|
1203 |
|
|
int n_size
|
1204 |
|
|
) {
|
1205 |
|
|
memset (p_s_tb,'\0',sizeof(struct tree_balance));
|
1206 |
|
|
p_s_tb->transaction_handle = th ;
|
1207 |
|
|
p_s_tb->tb_sb = p_s_sb;
|
1208 |
|
|
p_s_tb->tb_path = p_s_path;
|
1209 |
|
|
PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
|
1210 |
|
|
PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
|
1211 |
|
|
p_s_tb->insert_size[0] = n_size;
|
1212 |
|
|
}
|
1213 |
|
|
|
1214 |
|
|
|
1215 |
|
|
|
1216 |
|
|
void padd_item (char * item, int total_length, int length)
|
1217 |
|
|
{
|
1218 |
|
|
int i;
|
1219 |
|
|
|
1220 |
|
|
for (i = total_length; i > length; )
|
1221 |
|
|
item [--i] = 0;
|
1222 |
|
|
}
|
1223 |
|
|
|
1224 |
|
|
|
1225 |
|
|
/* Delete object item. */
|
1226 |
|
|
int reiserfs_delete_item (struct reiserfs_transaction_handle *th,
|
1227 |
|
|
struct path * p_s_path, /* Path to the deleted item. */
|
1228 |
|
|
const struct cpu_key * p_s_item_key, /* Key to search for the deleted item. */
|
1229 |
|
|
struct inode * p_s_inode,/* inode is here just to update i_blocks */
|
1230 |
|
|
struct buffer_head * p_s_un_bh) /* NULL or unformatted node pointer. */
|
1231 |
|
|
{
|
1232 |
|
|
struct super_block * p_s_sb = p_s_inode->i_sb;
|
1233 |
|
|
struct tree_balance s_del_balance;
|
1234 |
|
|
struct item_head s_ih;
|
1235 |
|
|
int n_ret_value,
|
1236 |
|
|
n_del_size,
|
1237 |
|
|
n_removed;
|
1238 |
|
|
|
1239 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
1240 |
|
|
char c_mode;
|
1241 |
|
|
int n_iter = 0;
|
1242 |
|
|
#endif
|
1243 |
|
|
|
1244 |
|
|
init_tb_struct(th, &s_del_balance, p_s_sb, p_s_path, 0/*size is unknown*/);
|
1245 |
|
|
|
1246 |
|
|
while ( 1 ) {
|
1247 |
|
|
n_removed = 0;
|
1248 |
|
|
|
1249 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
1250 |
|
|
n_iter++;
|
1251 |
|
|
c_mode =
|
1252 |
|
|
#endif
|
1253 |
|
|
prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed, &n_del_size, max_reiserfs_offset (p_s_inode));
|
1254 |
|
|
|
1255 |
|
|
RFALSE( c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
|
1256 |
|
|
|
1257 |
|
|
copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
|
1258 |
|
|
s_del_balance.insert_size[0] = n_del_size;
|
1259 |
|
|
|
1260 |
|
|
n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, 0);
|
1261 |
|
|
if ( n_ret_value != REPEAT_SEARCH )
|
1262 |
|
|
break;
|
1263 |
|
|
|
1264 |
|
|
PROC_INFO_INC( p_s_sb, delete_item_restarted );
|
1265 |
|
|
|
1266 |
|
|
// file system changed, repeat search
|
1267 |
|
|
n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
|
1268 |
|
|
if (n_ret_value == IO_ERROR)
|
1269 |
|
|
break;
|
1270 |
|
|
if (n_ret_value == FILE_NOT_FOUND) {
|
1271 |
|
|
reiserfs_warning (p_s_sb, "vs-5340: reiserfs_delete_item: "
|
1272 |
|
|
"no items of the file %K found\n", p_s_item_key);
|
1273 |
|
|
break;
|
1274 |
|
|
}
|
1275 |
|
|
} /* while (1) */
|
1276 |
|
|
|
1277 |
|
|
if ( n_ret_value != CARRY_ON ) {
|
1278 |
|
|
unfix_nodes(&s_del_balance);
|
1279 |
|
|
return 0;
|
1280 |
|
|
}
|
1281 |
|
|
|
1282 |
|
|
// reiserfs_delete_item returns item length when success
|
1283 |
|
|
n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
|
1284 |
|
|
|
1285 |
|
|
if ( p_s_un_bh ) {
|
1286 |
|
|
int off;
|
1287 |
|
|
char *data ;
|
1288 |
|
|
|
1289 |
|
|
/* We are in direct2indirect conversion, so move tail contents
|
1290 |
|
|
to the unformatted node */
|
1291 |
|
|
/* note, we do the copy before preparing the buffer because we
|
1292 |
|
|
** don't care about the contents of the unformatted node yet.
|
1293 |
|
|
** the only thing we really care about is the direct item's data
|
1294 |
|
|
** is in the unformatted node.
|
1295 |
|
|
**
|
1296 |
|
|
** Otherwise, we would have to call reiserfs_prepare_for_journal on
|
1297 |
|
|
** the unformatted node, which might schedule, meaning we'd have to
|
1298 |
|
|
** loop all the way back up to the start of the while loop.
|
1299 |
|
|
**
|
1300 |
|
|
** The unformatted node must be dirtied later on. We can't be
|
1301 |
|
|
** sure here if the entire tail has been deleted yet.
|
1302 |
|
|
**
|
1303 |
|
|
** p_s_un_bh is from the page cache (all unformatted nodes are
|
1304 |
|
|
** from the page cache) and might be a highmem page. So, we
|
1305 |
|
|
** can't use p_s_un_bh->b_data. But, the page has already been
|
1306 |
|
|
** kmapped, so we can use page_address()
|
1307 |
|
|
** -clm
|
1308 |
|
|
*/
|
1309 |
|
|
|
1310 |
|
|
data = page_address(p_s_un_bh->b_page) ;
|
1311 |
|
|
off = ((le_ih_k_offset (&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
|
1312 |
|
|
memcpy(data + off,
|
1313 |
|
|
B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih), n_ret_value);
|
1314 |
|
|
}
|
1315 |
|
|
|
1316 |
|
|
/* Perform balancing after all resources have been collected at once. */
|
1317 |
|
|
do_balance(&s_del_balance, NULL, NULL, M_DELETE);
|
1318 |
|
|
|
1319 |
|
|
/* Return deleted body length */
|
1320 |
|
|
return n_ret_value;
|
1321 |
|
|
}
|
1322 |
|
|
|
1323 |
|
|
|
1324 |
|
|
/* Summary Of Mechanisms For Handling Collisions Between Processes:
|
1325 |
|
|
|
1326 |
|
|
deletion of the body of the object is performed by iput(), with the
|
1327 |
|
|
result that if multiple processes are operating on a file, the
|
1328 |
|
|
deletion of the body of the file is deferred until the last process
|
1329 |
|
|
that has an open inode performs its iput().
|
1330 |
|
|
|
1331 |
|
|
writes and truncates are protected from collisions by use of
|
1332 |
|
|
semaphores.
|
1333 |
|
|
|
1334 |
|
|
creates, linking, and mknod are protected from collisions with other
|
1335 |
|
|
processes by making the reiserfs_add_entry() the last step in the
|
1336 |
|
|
creation, and then rolling back all changes if there was a collision.
|
1337 |
|
|
- Hans
|
1338 |
|
|
*/
|
1339 |
|
|
|
1340 |
|
|
|
1341 |
|
|
/* this deletes item which never gets split */
|
1342 |
|
|
void reiserfs_delete_solid_item (struct reiserfs_transaction_handle *th,
|
1343 |
|
|
struct key * key)
|
1344 |
|
|
{
|
1345 |
|
|
struct tree_balance tb;
|
1346 |
|
|
INITIALIZE_PATH (path);
|
1347 |
|
|
int item_len;
|
1348 |
|
|
int tb_init = 0 ;
|
1349 |
|
|
struct cpu_key cpu_key;
|
1350 |
|
|
int retval;
|
1351 |
|
|
|
1352 |
|
|
le_key2cpu_key (&cpu_key, key);
|
1353 |
|
|
|
1354 |
|
|
while (1) {
|
1355 |
|
|
retval = search_item (th->t_super, &cpu_key, &path);
|
1356 |
|
|
if (retval == IO_ERROR) {
|
1357 |
|
|
reiserfs_warning (th->t_super, "vs-5350: reiserfs_delete_solid_item: "
|
1358 |
|
|
"i/o failure occurred trying to delete %K\n", &cpu_key);
|
1359 |
|
|
break;
|
1360 |
|
|
}
|
1361 |
|
|
if (retval != ITEM_FOUND) {
|
1362 |
|
|
pathrelse (&path);
|
1363 |
|
|
// No need for a warning, if there is just no free space to insert '..' item into the newly-created subdir
|
1364 |
|
|
if ( !( (unsigned long long) GET_HASH_VALUE (le_key_k_offset (le_key_version (key), key)) == 0 && \
|
1365 |
|
|
GET_GENERATION_NUMBER (le_key_k_offset (le_key_version (key), key)) == 1 ) )
|
1366 |
|
|
reiserfs_warning (th->t_super, "vs-5355: reiserfs_delete_solid_item: %k not found\n", key);
|
1367 |
|
|
break;
|
1368 |
|
|
}
|
1369 |
|
|
if (!tb_init) {
|
1370 |
|
|
tb_init = 1 ;
|
1371 |
|
|
item_len = ih_item_len( PATH_PITEM_HEAD(&path) );
|
1372 |
|
|
init_tb_struct (th, &tb, th->t_super, &path, - (IH_SIZE + item_len));
|
1373 |
|
|
}
|
1374 |
|
|
|
1375 |
|
|
retval = fix_nodes (M_DELETE, &tb, NULL, 0);
|
1376 |
|
|
if (retval == REPEAT_SEARCH) {
|
1377 |
|
|
PROC_INFO_INC( th -> t_super, delete_solid_item_restarted );
|
1378 |
|
|
continue;
|
1379 |
|
|
}
|
1380 |
|
|
|
1381 |
|
|
if (retval == CARRY_ON) {
|
1382 |
|
|
do_balance (&tb, 0, 0, M_DELETE);
|
1383 |
|
|
break;
|
1384 |
|
|
}
|
1385 |
|
|
|
1386 |
|
|
// IO_ERROR, NO_DISK_SPACE, etc
|
1387 |
|
|
reiserfs_warning (th->t_super, "vs-5360: reiserfs_delete_solid_item: "
|
1388 |
|
|
"could not delete %K due to fix_nodes failure\n", &cpu_key);
|
1389 |
|
|
unfix_nodes (&tb);
|
1390 |
|
|
break;
|
1391 |
|
|
}
|
1392 |
|
|
|
1393 |
|
|
reiserfs_check_path(&path) ;
|
1394 |
|
|
}
|
1395 |
|
|
|
1396 |
|
|
|
1397 |
|
|
void reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode * inode)
|
1398 |
|
|
{
|
1399 |
|
|
inode->i_size = 0;
|
1400 |
|
|
|
1401 |
|
|
/* for directory this deletes item containing "." and ".." */
|
1402 |
|
|
reiserfs_do_truncate (th, inode, NULL, 0/*no timestamp updates*/);
|
1403 |
|
|
|
1404 |
|
|
#if defined( USE_INODE_GENERATION_COUNTER )
|
1405 |
|
|
if( !old_format_only ( th -> t_super ) )
|
1406 |
|
|
{
|
1407 |
|
|
__u32 *inode_generation;
|
1408 |
|
|
|
1409 |
|
|
inode_generation =
|
1410 |
|
|
&th -> t_super -> u.reiserfs_sb.s_rs -> s_inode_generation;
|
1411 |
|
|
*inode_generation = cpu_to_le32( le32_to_cpu( *inode_generation ) + 1 );
|
1412 |
|
|
}
|
1413 |
|
|
/* USE_INODE_GENERATION_COUNTER */
|
1414 |
|
|
#endif
|
1415 |
|
|
reiserfs_delete_solid_item (th, INODE_PKEY (inode));
|
1416 |
|
|
}
|
1417 |
|
|
|
1418 |
|
|
|
1419 |
|
|
static int maybe_indirect_to_direct (struct reiserfs_transaction_handle *th,
|
1420 |
|
|
struct inode * p_s_inode,
|
1421 |
|
|
struct page *page,
|
1422 |
|
|
struct path * p_s_path,
|
1423 |
|
|
const struct cpu_key * p_s_item_key,
|
1424 |
|
|
loff_t n_new_file_size,
|
1425 |
|
|
char * p_c_mode
|
1426 |
|
|
) {
|
1427 |
|
|
struct super_block * p_s_sb = p_s_inode->i_sb;
|
1428 |
|
|
int n_block_size = p_s_sb->s_blocksize;
|
1429 |
|
|
int cut_bytes;
|
1430 |
|
|
|
1431 |
|
|
if (n_new_file_size != p_s_inode->i_size)
|
1432 |
|
|
BUG ();
|
1433 |
|
|
|
1434 |
|
|
/* the page being sent in could be NULL if there was an i/o error
|
1435 |
|
|
** reading in the last block. The user will hit problems trying to
|
1436 |
|
|
** read the file, but for now we just skip the indirect2direct
|
1437 |
|
|
*/
|
1438 |
|
|
if (atomic_read(&p_s_inode->i_count) > 1 ||
|
1439 |
|
|
!tail_has_to_be_packed (p_s_inode) ||
|
1440 |
|
|
!page || (p_s_inode->u.reiserfs_i.i_flags & i_nopack_mask)) {
|
1441 |
|
|
// leave tail in an unformatted node
|
1442 |
|
|
*p_c_mode = M_SKIP_BALANCING;
|
1443 |
|
|
cut_bytes = n_block_size - (n_new_file_size & (n_block_size - 1));
|
1444 |
|
|
pathrelse(p_s_path);
|
1445 |
|
|
return cut_bytes;
|
1446 |
|
|
}
|
1447 |
|
|
/* Permorm the conversion to a direct_item. */
|
1448 |
|
|
/*return indirect_to_direct (p_s_inode, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);*/
|
1449 |
|
|
return indirect2direct (th, p_s_inode, page, p_s_path, p_s_item_key, n_new_file_size, p_c_mode);
|
1450 |
|
|
}
|
1451 |
|
|
|
1452 |
|
|
|
1453 |
|
|
/* we did indirect_to_direct conversion. And we have inserted direct
|
1454 |
|
|
item successesfully, but there were no disk space to cut unfm
|
1455 |
|
|
pointer being converted. Therefore we have to delete inserted
|
1456 |
|
|
direct item(s) */
|
1457 |
|
|
static void indirect_to_direct_roll_back (struct reiserfs_transaction_handle *th, struct inode * inode, struct path * path)
|
1458 |
|
|
{
|
1459 |
|
|
struct cpu_key tail_key;
|
1460 |
|
|
int tail_len;
|
1461 |
|
|
int removed;
|
1462 |
|
|
|
1463 |
|
|
make_cpu_key (&tail_key, inode, inode->i_size + 1, TYPE_DIRECT, 4);// !!!!
|
1464 |
|
|
tail_key.key_length = 4;
|
1465 |
|
|
|
1466 |
|
|
tail_len = (cpu_key_k_offset (&tail_key) & (inode->i_sb->s_blocksize - 1)) - 1;
|
1467 |
|
|
while (tail_len) {
|
1468 |
|
|
/* look for the last byte of the tail */
|
1469 |
|
|
if (search_for_position_by_key (inode->i_sb, &tail_key, path) == POSITION_NOT_FOUND)
|
1470 |
|
|
reiserfs_panic (inode->i_sb, "vs-5615: indirect_to_direct_roll_back: found invalid item");
|
1471 |
|
|
RFALSE( path->pos_in_item != ih_item_len(PATH_PITEM_HEAD (path)) - 1,
|
1472 |
|
|
"vs-5616: appended bytes found");
|
1473 |
|
|
PATH_LAST_POSITION (path) --;
|
1474 |
|
|
|
1475 |
|
|
removed = reiserfs_delete_item (th, path, &tail_key, inode, 0/*unbh not needed*/);
|
1476 |
|
|
RFALSE( removed <= 0 || removed > tail_len,
|
1477 |
|
|
"vs-5617: there was tail %d bytes, removed item length %d bytes",
|
1478 |
|
|
tail_len, removed);
|
1479 |
|
|
tail_len -= removed;
|
1480 |
|
|
set_cpu_key_k_offset (&tail_key, cpu_key_k_offset (&tail_key) - removed);
|
1481 |
|
|
}
|
1482 |
|
|
reiserfs_warning (inode->i_sb, "indirect_to_direct_roll_back: indirect_to_direct conversion has been rolled back due to lack of disk space\n");
|
1483 |
|
|
//mark_file_without_tail (inode);
|
1484 |
|
|
mark_inode_dirty (inode);
|
1485 |
|
|
}
|
1486 |
|
|
|
1487 |
|
|
|
1488 |
|
|
/* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
|
1489 |
|
|
int reiserfs_cut_from_item (struct reiserfs_transaction_handle *th,
|
1490 |
|
|
struct path * p_s_path,
|
1491 |
|
|
struct cpu_key * p_s_item_key,
|
1492 |
|
|
struct inode * p_s_inode,
|
1493 |
|
|
struct page *page,
|
1494 |
|
|
loff_t n_new_file_size)
|
1495 |
|
|
{
|
1496 |
|
|
struct super_block * p_s_sb = p_s_inode->i_sb;
|
1497 |
|
|
/* Every function which is going to call do_balance must first
|
1498 |
|
|
create a tree_balance structure. Then it must fill up this
|
1499 |
|
|
structure by using the init_tb_struct and fix_nodes functions.
|
1500 |
|
|
After that we can make tree balancing. */
|
1501 |
|
|
struct tree_balance s_cut_balance;
|
1502 |
|
|
int n_cut_size = 0, /* Amount to be cut. */
|
1503 |
|
|
n_ret_value = CARRY_ON,
|
1504 |
|
|
n_removed = 0, /* Number of the removed unformatted nodes. */
|
1505 |
|
|
n_is_inode_locked = 0;
|
1506 |
|
|
char c_mode; /* Mode of the balance. */
|
1507 |
|
|
int retval2 = -1;
|
1508 |
|
|
|
1509 |
|
|
|
1510 |
|
|
init_tb_struct(th, &s_cut_balance, p_s_inode->i_sb, p_s_path, n_cut_size);
|
1511 |
|
|
|
1512 |
|
|
|
1513 |
|
|
/* Repeat this loop until we either cut the item without needing
|
1514 |
|
|
to balance, or we fix_nodes without schedule occuring */
|
1515 |
|
|
while ( 1 ) {
|
1516 |
|
|
/* Determine the balance mode, position of the first byte to
|
1517 |
|
|
be cut, and size to be cut. In case of the indirect item
|
1518 |
|
|
free unformatted nodes which are pointed to by the cut
|
1519 |
|
|
pointers. */
|
1520 |
|
|
|
1521 |
|
|
c_mode = prepare_for_delete_or_cut(th, p_s_inode, p_s_path, p_s_item_key, &n_removed,
|
1522 |
|
|
&n_cut_size, n_new_file_size);
|
1523 |
|
|
if ( c_mode == M_CONVERT ) {
|
1524 |
|
|
/* convert last unformatted node to direct item or leave
|
1525 |
|
|
tail in the unformatted node */
|
1526 |
|
|
RFALSE( n_ret_value != CARRY_ON, "PAP-5570: can not convert twice");
|
1527 |
|
|
|
1528 |
|
|
n_ret_value = maybe_indirect_to_direct (th, p_s_inode, page, p_s_path, p_s_item_key,
|
1529 |
|
|
n_new_file_size, &c_mode);
|
1530 |
|
|
if ( c_mode == M_SKIP_BALANCING )
|
1531 |
|
|
/* tail has been left in the unformatted node */
|
1532 |
|
|
return n_ret_value;
|
1533 |
|
|
|
1534 |
|
|
n_is_inode_locked = 1;
|
1535 |
|
|
|
1536 |
|
|
/* removing of last unformatted node will change value we
|
1537 |
|
|
have to return to truncate. Save it */
|
1538 |
|
|
retval2 = n_ret_value;
|
1539 |
|
|
/*retval2 = p_s_sb->s_blocksize - (n_new_file_size & (p_s_sb->s_blocksize - 1));*/
|
1540 |
|
|
|
1541 |
|
|
/* So, we have performed the first part of the conversion:
|
1542 |
|
|
inserting the new direct item. Now we are removing the
|
1543 |
|
|
last unformatted node pointer. Set key to search for
|
1544 |
|
|
it. */
|
1545 |
|
|
set_cpu_key_k_type (p_s_item_key, TYPE_INDIRECT);
|
1546 |
|
|
p_s_item_key->key_length = 4;
|
1547 |
|
|
n_new_file_size -= (n_new_file_size & (p_s_sb->s_blocksize - 1));
|
1548 |
|
|
set_cpu_key_k_offset (p_s_item_key, n_new_file_size + 1);
|
1549 |
|
|
if ( search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_NOT_FOUND ){
|
1550 |
|
|
print_block (PATH_PLAST_BUFFER (p_s_path), 3, PATH_LAST_POSITION (p_s_path) - 1, PATH_LAST_POSITION (p_s_path) + 1);
|
1551 |
|
|
reiserfs_panic(p_s_sb, "PAP-5580: reiserfs_cut_from_item: item to convert does not exist (%K)", p_s_item_key);
|
1552 |
|
|
}
|
1553 |
|
|
continue;
|
1554 |
|
|
}
|
1555 |
|
|
if (n_cut_size == 0) {
|
1556 |
|
|
pathrelse (p_s_path);
|
1557 |
|
|
return 0;
|
1558 |
|
|
}
|
1559 |
|
|
|
1560 |
|
|
s_cut_balance.insert_size[0] = n_cut_size;
|
1561 |
|
|
|
1562 |
|
|
n_ret_value = fix_nodes(c_mode, &s_cut_balance, NULL, 0);
|
1563 |
|
|
if ( n_ret_value != REPEAT_SEARCH )
|
1564 |
|
|
break;
|
1565 |
|
|
|
1566 |
|
|
PROC_INFO_INC( p_s_sb, cut_from_item_restarted );
|
1567 |
|
|
|
1568 |
|
|
n_ret_value = search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
|
1569 |
|
|
if (n_ret_value == POSITION_FOUND)
|
1570 |
|
|
continue;
|
1571 |
|
|
|
1572 |
|
|
reiserfs_warning (p_s_sb, "PAP-5610: reiserfs_cut_from_item: item %K not found\n", p_s_item_key);
|
1573 |
|
|
unfix_nodes (&s_cut_balance);
|
1574 |
|
|
return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
|
1575 |
|
|
} /* while */
|
1576 |
|
|
|
1577 |
|
|
// check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
|
1578 |
|
|
if ( n_ret_value != CARRY_ON ) {
|
1579 |
|
|
if ( n_is_inode_locked ) {
|
1580 |
|
|
// FIXME: this seems to be not needed: we are always able
|
1581 |
|
|
// to cut item
|
1582 |
|
|
indirect_to_direct_roll_back (th, p_s_inode, p_s_path);
|
1583 |
|
|
}
|
1584 |
|
|
if (n_ret_value == NO_DISK_SPACE)
|
1585 |
|
|
reiserfs_warning (p_s_sb, "NO_DISK_SPACE\n");
|
1586 |
|
|
unfix_nodes (&s_cut_balance);
|
1587 |
|
|
return -EIO;
|
1588 |
|
|
}
|
1589 |
|
|
|
1590 |
|
|
/* go ahead and perform balancing */
|
1591 |
|
|
|
1592 |
|
|
RFALSE( c_mode == M_PASTE || c_mode == M_INSERT, "illegal mode");
|
1593 |
|
|
|
1594 |
|
|
/* Calculate number of bytes that need to be cut from the item. */
|
1595 |
|
|
if (retval2 == -1)
|
1596 |
|
|
n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode);
|
1597 |
|
|
else
|
1598 |
|
|
n_ret_value = retval2;
|
1599 |
|
|
|
1600 |
|
|
if ( c_mode == M_DELETE ) {
|
1601 |
|
|
struct item_head * p_le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
|
1602 |
|
|
|
1603 |
|
|
if ( is_direct_le_ih (p_le_ih) && (le_ih_k_offset (p_le_ih) & (p_s_sb->s_blocksize - 1)) == 1 ) {
|
1604 |
|
|
/* we delete first part of tail which was stored in direct
|
1605 |
|
|
item(s) */
|
1606 |
|
|
// FIXME: this is to keep 3.5 happy
|
1607 |
|
|
p_s_inode->u.reiserfs_i.i_first_direct_byte = U32_MAX;
|
1608 |
|
|
p_s_inode->i_blocks -= p_s_sb->s_blocksize / 512;
|
1609 |
|
|
}
|
1610 |
|
|
}
|
1611 |
|
|
|
1612 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
1613 |
|
|
if (n_is_inode_locked) {
|
1614 |
|
|
struct item_head * le_ih = PATH_PITEM_HEAD (s_cut_balance.tb_path);
|
1615 |
|
|
/* we are going to complete indirect2direct conversion. Make
|
1616 |
|
|
sure, that we exactly remove last unformatted node pointer
|
1617 |
|
|
of the item */
|
1618 |
|
|
if (!is_indirect_le_ih (le_ih))
|
1619 |
|
|
reiserfs_panic (p_s_sb, "vs-5652: reiserfs_cut_from_item: "
|
1620 |
|
|
"item must be indirect %h", le_ih);
|
1621 |
|
|
|
1622 |
|
|
if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
|
1623 |
|
|
reiserfs_panic (p_s_sb, "vs-5653: reiserfs_cut_from_item: "
|
1624 |
|
|
"completing indirect2direct conversion indirect item %h "
|
1625 |
|
|
"being deleted must be of 4 byte long", le_ih);
|
1626 |
|
|
|
1627 |
|
|
if (c_mode == M_CUT && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
|
1628 |
|
|
reiserfs_panic (p_s_sb, "vs-5654: reiserfs_cut_from_item: "
|
1629 |
|
|
"can not complete indirect2direct conversion of %h (CUT, insert_size==%d)",
|
1630 |
|
|
le_ih, s_cut_balance.insert_size[0]);
|
1631 |
|
|
}
|
1632 |
|
|
/* it would be useful to make sure, that right neighboring
|
1633 |
|
|
item is direct item of this file */
|
1634 |
|
|
}
|
1635 |
|
|
#endif
|
1636 |
|
|
|
1637 |
|
|
do_balance(&s_cut_balance, NULL, NULL, c_mode);
|
1638 |
|
|
if ( n_is_inode_locked ) {
|
1639 |
|
|
/* we've done an indirect->direct conversion. when the data block
|
1640 |
|
|
** was freed, it was removed from the list of blocks that must
|
1641 |
|
|
** be flushed before the transaction commits, so we don't need to
|
1642 |
|
|
** deal with it here.
|
1643 |
|
|
*/
|
1644 |
|
|
p_s_inode->u.reiserfs_i.i_flags &= ~i_pack_on_close_mask;
|
1645 |
|
|
}
|
1646 |
|
|
return n_ret_value;
|
1647 |
|
|
}
|
1648 |
|
|
|
1649 |
|
|
|
1650 |
|
|
static void truncate_directory (struct reiserfs_transaction_handle *th, struct inode * inode)
|
1651 |
|
|
{
|
1652 |
|
|
if (inode->i_nlink)
|
1653 |
|
|
reiserfs_warning (th->t_super, "vs-5655: truncate_directory: link count != 0\n");
|
1654 |
|
|
|
1655 |
|
|
set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), DOT_OFFSET);
|
1656 |
|
|
set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_DIRENTRY);
|
1657 |
|
|
reiserfs_delete_solid_item (th, INODE_PKEY (inode));
|
1658 |
|
|
|
1659 |
|
|
set_le_key_k_offset (KEY_FORMAT_3_5, INODE_PKEY (inode), SD_OFFSET);
|
1660 |
|
|
set_le_key_k_type (KEY_FORMAT_3_5, INODE_PKEY (inode), TYPE_STAT_DATA);
|
1661 |
|
|
}
|
1662 |
|
|
|
1663 |
|
|
|
1664 |
|
|
|
1665 |
|
|
|
1666 |
|
|
/* Truncate file to the new size. Note, this must be called with a transaction
|
1667 |
|
|
already started */
|
1668 |
|
|
void reiserfs_do_truncate (struct reiserfs_transaction_handle *th,
|
1669 |
|
|
struct inode * p_s_inode, /* ->i_size contains new
|
1670 |
|
|
size */
|
1671 |
|
|
struct page *page, /* up to date for last block */
|
1672 |
|
|
int update_timestamps /* when it is called by
|
1673 |
|
|
file_release to convert
|
1674 |
|
|
the tail - no timestamps
|
1675 |
|
|
should be updated */
|
1676 |
|
|
) {
|
1677 |
|
|
INITIALIZE_PATH (s_search_path); /* Path to the current object item. */
|
1678 |
|
|
struct item_head * p_le_ih; /* Pointer to an item header. */
|
1679 |
|
|
struct cpu_key s_item_key; /* Key to search for a previous file item. */
|
1680 |
|
|
loff_t n_file_size, /* Old file size. */
|
1681 |
|
|
n_new_file_size;/* New file size. */
|
1682 |
|
|
int n_deleted; /* Number of deleted or truncated bytes. */
|
1683 |
|
|
int retval;
|
1684 |
|
|
|
1685 |
|
|
if ( ! (S_ISREG(p_s_inode->i_mode) || S_ISDIR(p_s_inode->i_mode) || S_ISLNK(p_s_inode->i_mode)) )
|
1686 |
|
|
return;
|
1687 |
|
|
|
1688 |
|
|
if (S_ISDIR(p_s_inode->i_mode)) {
|
1689 |
|
|
// deletion of directory - no need to update timestamps
|
1690 |
|
|
truncate_directory (th, p_s_inode);
|
1691 |
|
|
return;
|
1692 |
|
|
}
|
1693 |
|
|
|
1694 |
|
|
/* Get new file size. */
|
1695 |
|
|
n_new_file_size = p_s_inode->i_size;
|
1696 |
|
|
|
1697 |
|
|
// FIXME: note, that key type is unimportant here
|
1698 |
|
|
make_cpu_key (&s_item_key, p_s_inode, max_reiserfs_offset (p_s_inode), TYPE_DIRECT, 3);
|
1699 |
|
|
|
1700 |
|
|
retval = search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path);
|
1701 |
|
|
if (retval == IO_ERROR) {
|
1702 |
|
|
reiserfs_warning (p_s_inode->i_sb, "vs-5657: reiserfs_do_truncate: "
|
1703 |
|
|
"i/o failure occurred trying to truncate %K\n", &s_item_key);
|
1704 |
|
|
return;
|
1705 |
|
|
}
|
1706 |
|
|
if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
|
1707 |
|
|
pathrelse (&s_search_path);
|
1708 |
|
|
reiserfs_warning (p_s_inode->i_sb, "PAP-5660: reiserfs_do_truncate: "
|
1709 |
|
|
"wrong result %d of search for %K\n", retval, &s_item_key);
|
1710 |
|
|
return;
|
1711 |
|
|
}
|
1712 |
|
|
|
1713 |
|
|
s_search_path.pos_in_item --;
|
1714 |
|
|
|
1715 |
|
|
/* Get real file size (total length of all file items) */
|
1716 |
|
|
p_le_ih = PATH_PITEM_HEAD(&s_search_path);
|
1717 |
|
|
if ( is_statdata_le_ih (p_le_ih) )
|
1718 |
|
|
n_file_size = 0;
|
1719 |
|
|
else {
|
1720 |
|
|
loff_t offset = le_ih_k_offset (p_le_ih);
|
1721 |
|
|
int bytes = op_bytes_number (p_le_ih,p_s_inode->i_sb->s_blocksize);
|
1722 |
|
|
|
1723 |
|
|
/* this may mismatch with real file size: if last direct item
|
1724 |
|
|
had no padding zeros and last unformatted node had no free
|
1725 |
|
|
space, this file would have this file size */
|
1726 |
|
|
n_file_size = offset + bytes - 1;
|
1727 |
|
|
}
|
1728 |
|
|
|
1729 |
|
|
if ( n_file_size == 0 || n_file_size < n_new_file_size ) {
|
1730 |
|
|
goto update_and_out ;
|
1731 |
|
|
}
|
1732 |
|
|
|
1733 |
|
|
/* Update key to search for the last file item. */
|
1734 |
|
|
set_cpu_key_k_offset (&s_item_key, n_file_size);
|
1735 |
|
|
|
1736 |
|
|
do {
|
1737 |
|
|
/* Cut or delete file item. */
|
1738 |
|
|
n_deleted = reiserfs_cut_from_item(th, &s_search_path, &s_item_key, p_s_inode, page, n_new_file_size);
|
1739 |
|
|
if (n_deleted < 0) {
|
1740 |
|
|
reiserfs_warning (th->t_super, "vs-5665: reiserfs_truncate_file: cut_from_item failed\n");
|
1741 |
|
|
reiserfs_check_path(&s_search_path) ;
|
1742 |
|
|
return;
|
1743 |
|
|
}
|
1744 |
|
|
|
1745 |
|
|
RFALSE( n_deleted > n_file_size,
|
1746 |
|
|
"PAP-5670: reiserfs_truncate_file returns too big number: deleted %d, file_size %lu, item_key %K",
|
1747 |
|
|
n_deleted, n_file_size, &s_item_key);
|
1748 |
|
|
|
1749 |
|
|
/* Change key to search the last file item. */
|
1750 |
|
|
n_file_size -= n_deleted;
|
1751 |
|
|
|
1752 |
|
|
set_cpu_key_k_offset (&s_item_key, n_file_size);
|
1753 |
|
|
|
1754 |
|
|
/* While there are bytes to truncate and previous file item is presented in the tree. */
|
1755 |
|
|
|
1756 |
|
|
/*
|
1757 |
|
|
** This loop could take a really long time, and could log
|
1758 |
|
|
** many more blocks than a transaction can hold. So, we do a polite
|
1759 |
|
|
** journal end here, and if the transaction needs ending, we make
|
1760 |
|
|
** sure the file is consistent before ending the current trans
|
1761 |
|
|
** and starting a new one
|
1762 |
|
|
*/
|
1763 |
|
|
if (journal_transaction_should_end(th, th->t_blocks_allocated)) {
|
1764 |
|
|
int orig_len_alloc = th->t_blocks_allocated ;
|
1765 |
|
|
decrement_counters_in_path(&s_search_path) ;
|
1766 |
|
|
|
1767 |
|
|
if (update_timestamps) {
|
1768 |
|
|
p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME;
|
1769 |
|
|
}
|
1770 |
|
|
reiserfs_update_sd(th, p_s_inode) ;
|
1771 |
|
|
|
1772 |
|
|
journal_end(th, p_s_inode->i_sb, orig_len_alloc) ;
|
1773 |
|
|
journal_begin(th, p_s_inode->i_sb, orig_len_alloc) ;
|
1774 |
|
|
reiserfs_update_inode_transaction(p_s_inode) ;
|
1775 |
|
|
}
|
1776 |
|
|
} while ( n_file_size > ROUND_UP (n_new_file_size) &&
|
1777 |
|
|
search_for_position_by_key(p_s_inode->i_sb, &s_item_key, &s_search_path) == POSITION_FOUND ) ;
|
1778 |
|
|
|
1779 |
|
|
RFALSE( n_file_size > ROUND_UP (n_new_file_size),
|
1780 |
|
|
"PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d\n",
|
1781 |
|
|
n_new_file_size, n_file_size, s_item_key.on_disk_key.k_objectid);
|
1782 |
|
|
|
1783 |
|
|
update_and_out:
|
1784 |
|
|
if (update_timestamps) {
|
1785 |
|
|
// this is truncate, not file closing
|
1786 |
|
|
p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME;
|
1787 |
|
|
}
|
1788 |
|
|
reiserfs_update_sd (th, p_s_inode);
|
1789 |
|
|
|
1790 |
|
|
pathrelse(&s_search_path) ;
|
1791 |
|
|
}
|
1792 |
|
|
|
1793 |
|
|
|
1794 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
1795 |
|
|
// this makes sure, that we __append__, not overwrite or add holes
|
1796 |
|
|
static void check_research_for_paste (struct path * path,
|
1797 |
|
|
const struct cpu_key * p_s_key)
|
1798 |
|
|
{
|
1799 |
|
|
struct item_head * found_ih = get_ih (path);
|
1800 |
|
|
|
1801 |
|
|
if (is_direct_le_ih (found_ih)) {
|
1802 |
|
|
if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_last_bh (path)->b_size) !=
|
1803 |
|
|
cpu_key_k_offset (p_s_key) ||
|
1804 |
|
|
op_bytes_number (found_ih, get_last_bh (path)->b_size) != pos_in_item (path))
|
1805 |
|
|
reiserfs_panic (0, "PAP-5720: check_research_for_paste: "
|
1806 |
|
|
"found direct item %h or position (%d) does not match to key %K",
|
1807 |
|
|
found_ih, pos_in_item (path), p_s_key);
|
1808 |
|
|
}
|
1809 |
|
|
if (is_indirect_le_ih (found_ih)) {
|
1810 |
|
|
if (le_ih_k_offset (found_ih) + op_bytes_number (found_ih, get_last_bh (path)->b_size) != cpu_key_k_offset (p_s_key) ||
|
1811 |
|
|
I_UNFM_NUM (found_ih) != pos_in_item (path) ||
|
1812 |
|
|
get_ih_free_space (found_ih) != 0)
|
1813 |
|
|
reiserfs_panic (0, "PAP-5730: check_research_for_paste: "
|
1814 |
|
|
"found indirect item (%h) or position (%d) does not match to key (%K)",
|
1815 |
|
|
found_ih, pos_in_item (path), p_s_key);
|
1816 |
|
|
}
|
1817 |
|
|
}
|
1818 |
|
|
#endif /* config reiserfs check */
|
1819 |
|
|
|
1820 |
|
|
|
1821 |
|
|
/* Paste bytes to the existing item. Returns bytes number pasted into the item. */
|
1822 |
|
|
int reiserfs_paste_into_item (struct reiserfs_transaction_handle *th,
|
1823 |
|
|
struct path * p_s_search_path, /* Path to the pasted item. */
|
1824 |
|
|
const struct cpu_key * p_s_key, /* Key to search for the needed item.*/
|
1825 |
|
|
const char * p_c_body, /* Pointer to the bytes to paste. */
|
1826 |
|
|
int n_pasted_size) /* Size of pasted bytes. */
|
1827 |
|
|
{
|
1828 |
|
|
struct tree_balance s_paste_balance;
|
1829 |
|
|
int retval;
|
1830 |
|
|
|
1831 |
|
|
init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path, n_pasted_size);
|
1832 |
|
|
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
|
1833 |
|
|
s_paste_balance.key = p_s_key->on_disk_key;
|
1834 |
|
|
#endif
|
1835 |
|
|
|
1836 |
|
|
while ( (retval = fix_nodes(M_PASTE, &s_paste_balance, NULL, p_c_body)) == REPEAT_SEARCH ) {
|
1837 |
|
|
/* file system changed while we were in the fix_nodes */
|
1838 |
|
|
PROC_INFO_INC( th -> t_super, paste_into_item_restarted );
|
1839 |
|
|
retval = search_for_position_by_key (th->t_super, p_s_key, p_s_search_path);
|
1840 |
|
|
if (retval == IO_ERROR) {
|
1841 |
|
|
retval = -EIO ;
|
1842 |
|
|
goto error_out ;
|
1843 |
|
|
}
|
1844 |
|
|
if (retval == POSITION_FOUND) {
|
1845 |
|
|
reiserfs_warning (th->t_super, "PAP-5710: reiserfs_paste_into_item: entry or pasted byte (%K) exists\n", p_s_key);
|
1846 |
|
|
retval = -EEXIST ;
|
1847 |
|
|
goto error_out ;
|
1848 |
|
|
}
|
1849 |
|
|
|
1850 |
|
|
#ifdef CONFIG_REISERFS_CHECK
|
1851 |
|
|
check_research_for_paste (p_s_search_path, p_s_key);
|
1852 |
|
|
#endif
|
1853 |
|
|
}
|
1854 |
|
|
|
1855 |
|
|
/* Perform balancing after all resources are collected by fix_nodes, and
|
1856 |
|
|
accessing them will not risk triggering schedule. */
|
1857 |
|
|
if ( retval == CARRY_ON ) {
|
1858 |
|
|
do_balance(&s_paste_balance, NULL/*ih*/, p_c_body, M_PASTE);
|
1859 |
|
|
return 0;
|
1860 |
|
|
}
|
1861 |
|
|
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
|
1862 |
|
|
error_out:
|
1863 |
|
|
/* this also releases the path */
|
1864 |
|
|
unfix_nodes(&s_paste_balance);
|
1865 |
|
|
return retval ;
|
1866 |
|
|
}
|
1867 |
|
|
|
1868 |
|
|
|
1869 |
|
|
/* Insert new item into the buffer at the path. */
|
1870 |
|
|
int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
|
1871 |
|
|
struct path * p_s_path, /* Path to the inserteded item. */
|
1872 |
|
|
const struct cpu_key * key,
|
1873 |
|
|
struct item_head * p_s_ih, /* Pointer to the item header to insert.*/
|
1874 |
|
|
const char * p_c_body) /* Pointer to the bytes to insert. */
|
1875 |
|
|
{
|
1876 |
|
|
struct tree_balance s_ins_balance;
|
1877 |
|
|
int retval;
|
1878 |
|
|
|
1879 |
|
|
init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path, IH_SIZE + ih_item_len(p_s_ih));
|
1880 |
|
|
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
|
1881 |
|
|
s_ins_balance.key = key->on_disk_key;
|
1882 |
|
|
#endif
|
1883 |
|
|
|
1884 |
|
|
/*
|
1885 |
|
|
if (p_c_body == 0)
|
1886 |
|
|
n_zeros_num = ih_item_len(p_s_ih);
|
1887 |
|
|
*/
|
1888 |
|
|
// le_key2cpu_key (&key, &(p_s_ih->ih_key));
|
1889 |
|
|
|
1890 |
|
|
while ( (retval = fix_nodes(M_INSERT, &s_ins_balance, p_s_ih, p_c_body)) == REPEAT_SEARCH) {
|
1891 |
|
|
/* file system changed while we were in the fix_nodes */
|
1892 |
|
|
PROC_INFO_INC( th -> t_super, insert_item_restarted );
|
1893 |
|
|
retval = search_item (th->t_super, key, p_s_path);
|
1894 |
|
|
if (retval == IO_ERROR) {
|
1895 |
|
|
retval = -EIO;
|
1896 |
|
|
goto error_out ;
|
1897 |
|
|
}
|
1898 |
|
|
if (retval == ITEM_FOUND) {
|
1899 |
|
|
reiserfs_warning (th->t_super, "PAP-5760: reiserfs_insert_item: "
|
1900 |
|
|
"key %K already exists in the tree\n", key);
|
1901 |
|
|
retval = -EEXIST ;
|
1902 |
|
|
goto error_out;
|
1903 |
|
|
}
|
1904 |
|
|
}
|
1905 |
|
|
|
1906 |
|
|
/* make balancing after all resources will be collected at a time */
|
1907 |
|
|
if ( retval == CARRY_ON ) {
|
1908 |
|
|
do_balance (&s_ins_balance, p_s_ih, p_c_body, M_INSERT);
|
1909 |
|
|
return 0;
|
1910 |
|
|
}
|
1911 |
|
|
|
1912 |
|
|
retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
|
1913 |
|
|
error_out:
|
1914 |
|
|
/* also releases the path */
|
1915 |
|
|
unfix_nodes(&s_ins_balance);
|
1916 |
|
|
return retval;
|
1917 |
|
|
}
|
1918 |
|
|
|
1919 |
|
|
|
1920 |
|
|
|
1921 |
|
|
|