1 |
30 |
unneback |
/*
|
2 |
|
|
* Copyright (c) 1988, 1989, 1993
|
3 |
|
|
* The Regents of the University of California. All rights reserved.
|
4 |
|
|
*
|
5 |
|
|
* Redistribution and use in source and binary forms, with or without
|
6 |
|
|
* modification, are permitted provided that the following conditions
|
7 |
|
|
* are met:
|
8 |
|
|
* 1. Redistributions of source code must retain the above copyright
|
9 |
|
|
* notice, this list of conditions and the following disclaimer.
|
10 |
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
11 |
|
|
* notice, this list of conditions and the following disclaimer in the
|
12 |
|
|
* documentation and/or other materials provided with the distribution.
|
13 |
|
|
* 3. All advertising materials mentioning features or use of this software
|
14 |
|
|
* must display the following acknowledgement:
|
15 |
|
|
* This product includes software developed by the University of
|
16 |
|
|
* California, Berkeley and its contributors.
|
17 |
|
|
* 4. Neither the name of the University nor the names of its contributors
|
18 |
|
|
* may be used to endorse or promote products derived from this software
|
19 |
|
|
* without specific prior written permission.
|
20 |
|
|
*
|
21 |
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
22 |
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
23 |
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
24 |
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
25 |
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
26 |
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
27 |
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
28 |
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
29 |
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
30 |
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
31 |
|
|
* SUCH DAMAGE.
|
32 |
|
|
*
|
33 |
|
|
* @(#)radix.c 8.4 (Berkeley) 11/2/94
|
34 |
|
|
* $Id: radix.c,v 1.2 2001-09-27 12:01:54 chris Exp $
|
35 |
|
|
*/
|
36 |
|
|
|
37 |
|
|
/*
|
38 |
|
|
* Routines to build and maintain radix trees for routing lookups.
|
39 |
|
|
*/
|
40 |
|
|
#ifndef _RADIX_H_
|
41 |
|
|
#include <sys/param.h>
|
42 |
|
|
#ifdef KERNEL
|
43 |
|
|
#include <sys/systm.h>
|
44 |
|
|
#include <sys/malloc.h>
|
45 |
|
|
#define M_DONTWAIT M_NOWAIT
|
46 |
|
|
#include <sys/domain.h>
|
47 |
|
|
#else
|
48 |
|
|
#include <stdlib.h>
|
49 |
|
|
#endif
|
50 |
|
|
#include <sys/syslog.h>
|
51 |
|
|
#include <net/radix.h>
|
52 |
|
|
#endif
|
53 |
|
|
|
54 |
|
|
static struct radix_node *
|
55 |
|
|
rn_lookup __P((void *v_arg, void *m_arg,
|
56 |
|
|
struct radix_node_head *head));
|
57 |
|
|
static int rn_walktree_from __P((struct radix_node_head *h, void *a,
|
58 |
|
|
void *m, walktree_f_t *f, void *w));
|
59 |
|
|
static int rn_walktree __P((struct radix_node_head *, walktree_f_t *, void *));
|
60 |
|
|
static struct radix_node
|
61 |
|
|
*rn_delete __P((void *, void *, struct radix_node_head *)),
|
62 |
|
|
*rn_insert __P((void *, struct radix_node_head *, int *,
|
63 |
|
|
struct radix_node [2])),
|
64 |
|
|
*rn_newpair __P((void *, int, struct radix_node[2])),
|
65 |
|
|
*rn_search __P((void *, struct radix_node *)),
|
66 |
|
|
*rn_search_m __P((void *, struct radix_node *, void *));
|
67 |
|
|
|
68 |
|
|
static int max_keylen;
|
69 |
|
|
static struct radix_mask *rn_mkfreelist;
|
70 |
|
|
static struct radix_node_head *mask_rnhead;
|
71 |
|
|
static char *addmask_key;
|
72 |
|
|
static char normal_chars[] = {0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, -1};
|
73 |
|
|
static char *rn_zeros, *rn_ones;
|
74 |
|
|
|
75 |
|
|
#define rn_masktop (mask_rnhead->rnh_treetop)
|
76 |
|
|
#undef Bcmp
|
77 |
|
|
#define Bcmp(a, b, l) (l == 0 ? 0 : bcmp((caddr_t)(a), (caddr_t)(b), (u_long)l))
|
78 |
|
|
|
79 |
|
|
static int rn_lexobetter __P((void *m_arg, void *n_arg));
|
80 |
|
|
static struct radix_mask *
|
81 |
|
|
rn_new_radix_mask __P((struct radix_node *tt,
|
82 |
|
|
struct radix_mask *next));
|
83 |
|
|
static int rn_satsifies_leaf __P((char *trial, struct radix_node *leaf,
|
84 |
|
|
int skip));
|
85 |
|
|
|
86 |
|
|
/*
|
87 |
|
|
* The data structure for the keys is a radix tree with one way
|
88 |
|
|
* branching removed. The index rn_b at an internal node n represents a bit
|
89 |
|
|
* position to be tested. The tree is arranged so that all descendants
|
90 |
|
|
* of a node n have keys whose bits all agree up to position rn_b - 1.
|
91 |
|
|
* (We say the index of n is rn_b.)
|
92 |
|
|
*
|
93 |
|
|
* There is at least one descendant which has a one bit at position rn_b,
|
94 |
|
|
* and at least one with a zero there.
|
95 |
|
|
*
|
96 |
|
|
* A route is determined by a pair of key and mask. We require that the
|
97 |
|
|
* bit-wise logical and of the key and mask to be the key.
|
98 |
|
|
* We define the index of a route to associated with the mask to be
|
99 |
|
|
* the first bit number in the mask where 0 occurs (with bit number 0
|
100 |
|
|
* representing the highest order bit).
|
101 |
|
|
*
|
102 |
|
|
* We say a mask is normal if every bit is 0, past the index of the mask.
|
103 |
|
|
* If a node n has a descendant (k, m) with index(m) == index(n) == rn_b,
|
104 |
|
|
* and m is a normal mask, then the route applies to every descendant of n.
|
105 |
|
|
* If the index(m) < rn_b, this implies the trailing last few bits of k
|
106 |
|
|
* before bit b are all 0, (and hence consequently true of every descendant
|
107 |
|
|
* of n), so the route applies to all descendants of the node as well.
|
108 |
|
|
*
|
109 |
|
|
* Similar logic shows that a non-normal mask m such that
|
110 |
|
|
* index(m) <= index(n) could potentially apply to many children of n.
|
111 |
|
|
* Thus, for each non-host route, we attach its mask to a list at an internal
|
112 |
|
|
* node as high in the tree as we can go.
|
113 |
|
|
*
|
114 |
|
|
* The present version of the code makes use of normal routes in short-
|
115 |
|
|
* circuiting an explict mask and compare operation when testing whether
|
116 |
|
|
* a key satisfies a normal route, and also in remembering the unique leaf
|
117 |
|
|
* that governs a subtree.
|
118 |
|
|
*/
|
119 |
|
|
|
120 |
|
|
static struct radix_node *
|
121 |
|
|
rn_search(v_arg, head)
|
122 |
|
|
void *v_arg;
|
123 |
|
|
struct radix_node *head;
|
124 |
|
|
{
|
125 |
|
|
register struct radix_node *x;
|
126 |
|
|
register caddr_t v;
|
127 |
|
|
|
128 |
|
|
for (x = head, v = v_arg; x->rn_b >= 0;) {
|
129 |
|
|
if (x->rn_bmask & v[x->rn_off])
|
130 |
|
|
x = x->rn_r;
|
131 |
|
|
else
|
132 |
|
|
x = x->rn_l;
|
133 |
|
|
}
|
134 |
|
|
return (x);
|
135 |
|
|
};
|
136 |
|
|
|
137 |
|
|
static struct radix_node *
|
138 |
|
|
rn_search_m(v_arg, head, m_arg)
|
139 |
|
|
struct radix_node *head;
|
140 |
|
|
void *v_arg, *m_arg;
|
141 |
|
|
{
|
142 |
|
|
register struct radix_node *x;
|
143 |
|
|
register caddr_t v = v_arg, m = m_arg;
|
144 |
|
|
|
145 |
|
|
for (x = head; x->rn_b >= 0;) {
|
146 |
|
|
if ((x->rn_bmask & m[x->rn_off]) &&
|
147 |
|
|
(x->rn_bmask & v[x->rn_off]))
|
148 |
|
|
x = x->rn_r;
|
149 |
|
|
else
|
150 |
|
|
x = x->rn_l;
|
151 |
|
|
}
|
152 |
|
|
return x;
|
153 |
|
|
};
|
154 |
|
|
|
155 |
|
|
int
|
156 |
|
|
rn_refines(m_arg, n_arg)
|
157 |
|
|
void *m_arg, *n_arg;
|
158 |
|
|
{
|
159 |
|
|
register caddr_t m = m_arg, n = n_arg;
|
160 |
|
|
register caddr_t lim, lim2 = lim = n + *(u_char *)n;
|
161 |
|
|
int longer = (*(u_char *)n++) - (int)(*(u_char *)m++);
|
162 |
|
|
int masks_are_equal = 1;
|
163 |
|
|
|
164 |
|
|
if (longer > 0)
|
165 |
|
|
lim -= longer;
|
166 |
|
|
while (n < lim) {
|
167 |
|
|
if (*n & ~(*m))
|
168 |
|
|
return 0;
|
169 |
|
|
if (*n++ != *m++)
|
170 |
|
|
masks_are_equal = 0;
|
171 |
|
|
}
|
172 |
|
|
while (n < lim2)
|
173 |
|
|
if (*n++)
|
174 |
|
|
return 0;
|
175 |
|
|
if (masks_are_equal && (longer < 0))
|
176 |
|
|
for (lim2 = m - longer; m < lim2; )
|
177 |
|
|
if (*m++)
|
178 |
|
|
return 1;
|
179 |
|
|
return (!masks_are_equal);
|
180 |
|
|
}
|
181 |
|
|
|
182 |
|
|
struct radix_node *
|
183 |
|
|
rn_lookup(v_arg, m_arg, head)
|
184 |
|
|
void *v_arg, *m_arg;
|
185 |
|
|
struct radix_node_head *head;
|
186 |
|
|
{
|
187 |
|
|
register struct radix_node *x;
|
188 |
|
|
caddr_t netmask = 0;
|
189 |
|
|
|
190 |
|
|
if (m_arg) {
|
191 |
|
|
if ((x = rn_addmask(m_arg, 1, head->rnh_treetop->rn_off)) == 0)
|
192 |
|
|
return (0);
|
193 |
|
|
netmask = x->rn_key;
|
194 |
|
|
}
|
195 |
|
|
x = rn_match(v_arg, head);
|
196 |
|
|
if (x && netmask) {
|
197 |
|
|
while (x && x->rn_mask != netmask)
|
198 |
|
|
x = x->rn_dupedkey;
|
199 |
|
|
}
|
200 |
|
|
return x;
|
201 |
|
|
}
|
202 |
|
|
|
203 |
|
|
static int
|
204 |
|
|
rn_satsifies_leaf(trial, leaf, skip)
|
205 |
|
|
char *trial;
|
206 |
|
|
register struct radix_node *leaf;
|
207 |
|
|
int skip;
|
208 |
|
|
{
|
209 |
|
|
register char *cp = trial, *cp2 = leaf->rn_key, *cp3 = leaf->rn_mask;
|
210 |
|
|
char *cplim;
|
211 |
|
|
int length = min(*(u_char *)cp, *(u_char *)cp2);
|
212 |
|
|
|
213 |
|
|
if (cp3 == 0)
|
214 |
|
|
cp3 = rn_ones;
|
215 |
|
|
else
|
216 |
|
|
length = min(length, *(u_char *)cp3);
|
217 |
|
|
cplim = cp + length; cp3 += skip; cp2 += skip;
|
218 |
|
|
for (cp += skip; cp < cplim; cp++, cp2++, cp3++)
|
219 |
|
|
if ((*cp ^ *cp2) & *cp3)
|
220 |
|
|
return 0;
|
221 |
|
|
return 1;
|
222 |
|
|
}
|
223 |
|
|
|
224 |
|
|
struct radix_node *
|
225 |
|
|
rn_match(v_arg, head)
|
226 |
|
|
void *v_arg;
|
227 |
|
|
struct radix_node_head *head;
|
228 |
|
|
{
|
229 |
|
|
caddr_t v = v_arg;
|
230 |
|
|
register struct radix_node *t = head->rnh_treetop, *x;
|
231 |
|
|
register caddr_t cp = v, cp2;
|
232 |
|
|
caddr_t cplim;
|
233 |
|
|
struct radix_node *saved_t, *top = t;
|
234 |
|
|
int off = t->rn_off, vlen = *(u_char *)cp, matched_off;
|
235 |
|
|
register int test, b, rn_b;
|
236 |
|
|
|
237 |
|
|
/*
|
238 |
|
|
* Open code rn_search(v, top) to avoid overhead of extra
|
239 |
|
|
* subroutine call.
|
240 |
|
|
*/
|
241 |
|
|
for (; t->rn_b >= 0; ) {
|
242 |
|
|
if (t->rn_bmask & cp[t->rn_off])
|
243 |
|
|
t = t->rn_r;
|
244 |
|
|
else
|
245 |
|
|
t = t->rn_l;
|
246 |
|
|
}
|
247 |
|
|
/*
|
248 |
|
|
* See if we match exactly as a host destination
|
249 |
|
|
* or at least learn how many bits match, for normal mask finesse.
|
250 |
|
|
*
|
251 |
|
|
* It doesn't hurt us to limit how many bytes to check
|
252 |
|
|
* to the length of the mask, since if it matches we had a genuine
|
253 |
|
|
* match and the leaf we have is the most specific one anyway;
|
254 |
|
|
* if it didn't match with a shorter length it would fail
|
255 |
|
|
* with a long one. This wins big for class B&C netmasks which
|
256 |
|
|
* are probably the most common case...
|
257 |
|
|
*/
|
258 |
|
|
if (t->rn_mask)
|
259 |
|
|
vlen = *(u_char *)t->rn_mask;
|
260 |
|
|
cp += off; cp2 = t->rn_key + off; cplim = v + vlen;
|
261 |
|
|
for (; cp < cplim; cp++, cp2++)
|
262 |
|
|
if (*cp != *cp2)
|
263 |
|
|
goto on1;
|
264 |
|
|
/*
|
265 |
|
|
* This extra grot is in case we are explicitly asked
|
266 |
|
|
* to look up the default. Ugh!
|
267 |
|
|
*/
|
268 |
|
|
if ((t->rn_flags & RNF_ROOT) && t->rn_dupedkey)
|
269 |
|
|
t = t->rn_dupedkey;
|
270 |
|
|
return t;
|
271 |
|
|
on1:
|
272 |
|
|
test = (*cp ^ *cp2) & 0xff; /* find first bit that differs */
|
273 |
|
|
for (b = 7; (test >>= 1) > 0;)
|
274 |
|
|
b--;
|
275 |
|
|
matched_off = cp - v;
|
276 |
|
|
b += matched_off << 3;
|
277 |
|
|
rn_b = -1 - b;
|
278 |
|
|
/*
|
279 |
|
|
* If there is a host route in a duped-key chain, it will be first.
|
280 |
|
|
*/
|
281 |
|
|
if ((saved_t = t)->rn_mask == 0)
|
282 |
|
|
t = t->rn_dupedkey;
|
283 |
|
|
for (; t; t = t->rn_dupedkey)
|
284 |
|
|
/*
|
285 |
|
|
* Even if we don't match exactly as a host,
|
286 |
|
|
* we may match if the leaf we wound up at is
|
287 |
|
|
* a route to a net.
|
288 |
|
|
*/
|
289 |
|
|
if (t->rn_flags & RNF_NORMAL) {
|
290 |
|
|
if (rn_b <= t->rn_b)
|
291 |
|
|
return t;
|
292 |
|
|
} else if (rn_satsifies_leaf(v, t, matched_off))
|
293 |
|
|
return t;
|
294 |
|
|
t = saved_t;
|
295 |
|
|
/* start searching up the tree */
|
296 |
|
|
do {
|
297 |
|
|
register struct radix_mask *m;
|
298 |
|
|
t = t->rn_p;
|
299 |
|
|
m = t->rn_mklist;
|
300 |
|
|
if (m) {
|
301 |
|
|
/*
|
302 |
|
|
* If non-contiguous masks ever become important
|
303 |
|
|
* we can restore the masking and open coding of
|
304 |
|
|
* the search and satisfaction test and put the
|
305 |
|
|
* calculation of "off" back before the "do".
|
306 |
|
|
*/
|
307 |
|
|
do {
|
308 |
|
|
if (m->rm_flags & RNF_NORMAL) {
|
309 |
|
|
if (rn_b <= m->rm_b)
|
310 |
|
|
return (m->rm_leaf);
|
311 |
|
|
} else {
|
312 |
|
|
off = min(t->rn_off, matched_off);
|
313 |
|
|
x = rn_search_m(v, t, m->rm_mask);
|
314 |
|
|
while (x && x->rn_mask != m->rm_mask)
|
315 |
|
|
x = x->rn_dupedkey;
|
316 |
|
|
if (x && rn_satsifies_leaf(v, x, off))
|
317 |
|
|
return x;
|
318 |
|
|
}
|
319 |
|
|
m = m->rm_mklist;
|
320 |
|
|
} while (m);
|
321 |
|
|
}
|
322 |
|
|
} while (t != top);
|
323 |
|
|
return 0;
|
324 |
|
|
};
|
325 |
|
|
|
326 |
|
|
#ifdef RN_DEBUG
|
327 |
|
|
int rn_nodenum;
|
328 |
|
|
struct radix_node *rn_clist;
|
329 |
|
|
int rn_saveinfo;
|
330 |
|
|
int rn_debug = 1;
|
331 |
|
|
#endif
|
332 |
|
|
|
333 |
|
|
static struct radix_node *
|
334 |
|
|
rn_newpair(v, b, nodes)
|
335 |
|
|
void *v;
|
336 |
|
|
int b;
|
337 |
|
|
struct radix_node nodes[2];
|
338 |
|
|
{
|
339 |
|
|
register struct radix_node *tt = nodes, *t = tt + 1;
|
340 |
|
|
t->rn_b = b; t->rn_bmask = 0x80 >> (b & 7);
|
341 |
|
|
t->rn_l = tt; t->rn_off = b >> 3;
|
342 |
|
|
tt->rn_b = -1; tt->rn_key = (caddr_t)v; tt->rn_p = t;
|
343 |
|
|
tt->rn_flags = t->rn_flags = RNF_ACTIVE;
|
344 |
|
|
#ifdef RN_DEBUG
|
345 |
|
|
tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
|
346 |
|
|
tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
|
347 |
|
|
#endif
|
348 |
|
|
return t;
|
349 |
|
|
}
|
350 |
|
|
|
351 |
|
|
static struct radix_node *
|
352 |
|
|
rn_insert(v_arg, head, dupentry, nodes)
|
353 |
|
|
void *v_arg;
|
354 |
|
|
struct radix_node_head *head;
|
355 |
|
|
int *dupentry;
|
356 |
|
|
struct radix_node nodes[2];
|
357 |
|
|
{
|
358 |
|
|
caddr_t v = v_arg;
|
359 |
|
|
struct radix_node *top = head->rnh_treetop;
|
360 |
|
|
int head_off = top->rn_off, vlen = (int)*((u_char *)v);
|
361 |
|
|
register struct radix_node *t = rn_search(v_arg, top);
|
362 |
|
|
register caddr_t cp = v + head_off;
|
363 |
|
|
register int b;
|
364 |
|
|
struct radix_node *tt;
|
365 |
|
|
/*
|
366 |
|
|
* Find first bit at which v and t->rn_key differ
|
367 |
|
|
*/
|
368 |
|
|
{
|
369 |
|
|
register caddr_t cp2 = t->rn_key + head_off;
|
370 |
|
|
register int cmp_res;
|
371 |
|
|
caddr_t cplim = v + vlen;
|
372 |
|
|
|
373 |
|
|
while (cp < cplim)
|
374 |
|
|
if (*cp2++ != *cp++)
|
375 |
|
|
goto on1;
|
376 |
|
|
*dupentry = 1;
|
377 |
|
|
return t;
|
378 |
|
|
on1:
|
379 |
|
|
*dupentry = 0;
|
380 |
|
|
cmp_res = (cp[-1] ^ cp2[-1]) & 0xff;
|
381 |
|
|
for (b = (cp - v) << 3; cmp_res; b--)
|
382 |
|
|
cmp_res >>= 1;
|
383 |
|
|
}
|
384 |
|
|
{
|
385 |
|
|
register struct radix_node *p, *x = top;
|
386 |
|
|
cp = v;
|
387 |
|
|
do {
|
388 |
|
|
p = x;
|
389 |
|
|
if (cp[x->rn_off] & x->rn_bmask)
|
390 |
|
|
x = x->rn_r;
|
391 |
|
|
else x = x->rn_l;
|
392 |
|
|
} while (b > (unsigned) x->rn_b); /* x->rn_b < b && x->rn_b >= 0 */
|
393 |
|
|
#ifdef RN_DEBUG
|
394 |
|
|
if (rn_debug)
|
395 |
|
|
log(LOG_DEBUG, "rn_insert: Going In:\n"), traverse(p);
|
396 |
|
|
#endif
|
397 |
|
|
t = rn_newpair(v_arg, b, nodes); tt = t->rn_l;
|
398 |
|
|
if ((cp[p->rn_off] & p->rn_bmask) == 0)
|
399 |
|
|
p->rn_l = t;
|
400 |
|
|
else
|
401 |
|
|
p->rn_r = t;
|
402 |
|
|
x->rn_p = t; t->rn_p = p; /* frees x, p as temp vars below */
|
403 |
|
|
if ((cp[t->rn_off] & t->rn_bmask) == 0) {
|
404 |
|
|
t->rn_r = x;
|
405 |
|
|
} else {
|
406 |
|
|
t->rn_r = tt; t->rn_l = x;
|
407 |
|
|
}
|
408 |
|
|
#ifdef RN_DEBUG
|
409 |
|
|
if (rn_debug)
|
410 |
|
|
log(LOG_DEBUG, "rn_insert: Coming Out:\n"), traverse(p);
|
411 |
|
|
#endif
|
412 |
|
|
}
|
413 |
|
|
return (tt);
|
414 |
|
|
}
|
415 |
|
|
|
416 |
|
|
struct radix_node *
|
417 |
|
|
rn_addmask(n_arg, search, skip)
|
418 |
|
|
int search, skip;
|
419 |
|
|
void *n_arg;
|
420 |
|
|
{
|
421 |
|
|
caddr_t netmask = (caddr_t)n_arg;
|
422 |
|
|
register struct radix_node *x;
|
423 |
|
|
register caddr_t cp, cplim;
|
424 |
|
|
register int b = 0, mlen, j;
|
425 |
|
|
int maskduplicated, m0, isnormal;
|
426 |
|
|
struct radix_node *saved_x;
|
427 |
|
|
static int last_zeroed = 0;
|
428 |
|
|
|
429 |
|
|
if ((mlen = *(u_char *)netmask) > max_keylen)
|
430 |
|
|
mlen = max_keylen;
|
431 |
|
|
if (skip == 0)
|
432 |
|
|
skip = 1;
|
433 |
|
|
if (mlen <= skip)
|
434 |
|
|
return (mask_rnhead->rnh_nodes);
|
435 |
|
|
if (skip > 1)
|
436 |
|
|
Bcopy(rn_ones + 1, addmask_key + 1, skip - 1);
|
437 |
|
|
if ((m0 = mlen) > skip)
|
438 |
|
|
Bcopy(netmask + skip, addmask_key + skip, mlen - skip);
|
439 |
|
|
/*
|
440 |
|
|
* Trim trailing zeroes.
|
441 |
|
|
*/
|
442 |
|
|
for (cp = addmask_key + mlen; (cp > addmask_key) && cp[-1] == 0;)
|
443 |
|
|
cp--;
|
444 |
|
|
mlen = cp - addmask_key;
|
445 |
|
|
if (mlen <= skip) {
|
446 |
|
|
if (m0 >= last_zeroed)
|
447 |
|
|
last_zeroed = mlen;
|
448 |
|
|
return (mask_rnhead->rnh_nodes);
|
449 |
|
|
}
|
450 |
|
|
if (m0 < last_zeroed)
|
451 |
|
|
Bzero(addmask_key + m0, last_zeroed - m0);
|
452 |
|
|
*addmask_key = last_zeroed = mlen;
|
453 |
|
|
x = rn_search(addmask_key, rn_masktop);
|
454 |
|
|
if (Bcmp(addmask_key, x->rn_key, mlen) != 0)
|
455 |
|
|
x = 0;
|
456 |
|
|
if (x || search)
|
457 |
|
|
return (x);
|
458 |
|
|
R_Malloc(x, struct radix_node *, max_keylen + 2 * sizeof (*x));
|
459 |
|
|
if ((saved_x = x) == 0)
|
460 |
|
|
return (0);
|
461 |
|
|
Bzero(x, max_keylen + 2 * sizeof (*x));
|
462 |
|
|
netmask = cp = (caddr_t)(x + 2);
|
463 |
|
|
Bcopy(addmask_key, cp, mlen);
|
464 |
|
|
x = rn_insert(cp, mask_rnhead, &maskduplicated, x);
|
465 |
|
|
if (maskduplicated) {
|
466 |
|
|
log(LOG_ERR, "rn_addmask: mask impossibly already in tree");
|
467 |
|
|
Free(saved_x);
|
468 |
|
|
return (x);
|
469 |
|
|
}
|
470 |
|
|
/*
|
471 |
|
|
* Calculate index of mask, and check for normalcy.
|
472 |
|
|
*/
|
473 |
|
|
cplim = netmask + mlen; isnormal = 1;
|
474 |
|
|
for (cp = netmask + skip; (cp < cplim) && *(u_char *)cp == 0xff;)
|
475 |
|
|
cp++;
|
476 |
|
|
if (cp != cplim) {
|
477 |
|
|
for (j = 0x80; (j & *cp) != 0; j >>= 1)
|
478 |
|
|
b++;
|
479 |
|
|
if (*cp != normal_chars[b] || cp != (cplim - 1))
|
480 |
|
|
isnormal = 0;
|
481 |
|
|
}
|
482 |
|
|
b += (cp - netmask) << 3;
|
483 |
|
|
x->rn_b = -1 - b;
|
484 |
|
|
if (isnormal)
|
485 |
|
|
x->rn_flags |= RNF_NORMAL;
|
486 |
|
|
return (x);
|
487 |
|
|
}
|
488 |
|
|
|
489 |
|
|
static int /* XXX: arbitrary ordering for non-contiguous masks */
|
490 |
|
|
rn_lexobetter(m_arg, n_arg)
|
491 |
|
|
void *m_arg, *n_arg;
|
492 |
|
|
{
|
493 |
|
|
register u_char *mp = m_arg, *np = n_arg, *lim;
|
494 |
|
|
|
495 |
|
|
if (*mp > *np)
|
496 |
|
|
return 1; /* not really, but need to check longer one first */
|
497 |
|
|
if (*mp == *np)
|
498 |
|
|
for (lim = mp + *mp; mp < lim;)
|
499 |
|
|
if (*mp++ > *np++)
|
500 |
|
|
return 1;
|
501 |
|
|
return 0;
|
502 |
|
|
}
|
503 |
|
|
|
504 |
|
|
static struct radix_mask *
|
505 |
|
|
rn_new_radix_mask(tt, next)
|
506 |
|
|
register struct radix_node *tt;
|
507 |
|
|
register struct radix_mask *next;
|
508 |
|
|
{
|
509 |
|
|
register struct radix_mask *m;
|
510 |
|
|
|
511 |
|
|
MKGet(m);
|
512 |
|
|
if (m == 0) {
|
513 |
|
|
log(LOG_ERR, "Mask for route not entered\n");
|
514 |
|
|
return (0);
|
515 |
|
|
}
|
516 |
|
|
Bzero(m, sizeof *m);
|
517 |
|
|
m->rm_b = tt->rn_b;
|
518 |
|
|
m->rm_flags = tt->rn_flags;
|
519 |
|
|
if (tt->rn_flags & RNF_NORMAL)
|
520 |
|
|
m->rm_leaf = tt;
|
521 |
|
|
else
|
522 |
|
|
m->rm_mask = tt->rn_mask;
|
523 |
|
|
m->rm_mklist = next;
|
524 |
|
|
tt->rn_mklist = m;
|
525 |
|
|
return m;
|
526 |
|
|
}
|
527 |
|
|
|
528 |
|
|
struct radix_node *
|
529 |
|
|
rn_addroute(v_arg, n_arg, head, treenodes)
|
530 |
|
|
void *v_arg, *n_arg;
|
531 |
|
|
struct radix_node_head *head;
|
532 |
|
|
struct radix_node treenodes[2];
|
533 |
|
|
{
|
534 |
|
|
caddr_t v = (caddr_t)v_arg, netmask = (caddr_t)n_arg;
|
535 |
|
|
register struct radix_node *t, *x = 0, *tt;
|
536 |
|
|
struct radix_node *saved_tt, *top = head->rnh_treetop;
|
537 |
|
|
short b = 0, b_leaf = 0;
|
538 |
|
|
int keyduplicated;
|
539 |
|
|
caddr_t mmask;
|
540 |
|
|
struct radix_mask *m, **mp;
|
541 |
|
|
|
542 |
|
|
/*
|
543 |
|
|
* In dealing with non-contiguous masks, there may be
|
544 |
|
|
* many different routes which have the same mask.
|
545 |
|
|
* We will find it useful to have a unique pointer to
|
546 |
|
|
* the mask to speed avoiding duplicate references at
|
547 |
|
|
* nodes and possibly save time in calculating indices.
|
548 |
|
|
*/
|
549 |
|
|
if (netmask) {
|
550 |
|
|
if ((x = rn_addmask(netmask, 0, top->rn_off)) == 0)
|
551 |
|
|
return (0);
|
552 |
|
|
b_leaf = x->rn_b;
|
553 |
|
|
b = -1 - x->rn_b;
|
554 |
|
|
netmask = x->rn_key;
|
555 |
|
|
}
|
556 |
|
|
/*
|
557 |
|
|
* Deal with duplicated keys: attach node to previous instance
|
558 |
|
|
*/
|
559 |
|
|
saved_tt = tt = rn_insert(v, head, &keyduplicated, treenodes);
|
560 |
|
|
if (keyduplicated) {
|
561 |
|
|
for (t = tt; tt; t = tt, tt = tt->rn_dupedkey) {
|
562 |
|
|
if (tt->rn_mask == netmask)
|
563 |
|
|
return (0);
|
564 |
|
|
if (netmask == 0 ||
|
565 |
|
|
(tt->rn_mask &&
|
566 |
|
|
((b_leaf < tt->rn_b) || /* index(netmask) > node */
|
567 |
|
|
rn_refines(netmask, tt->rn_mask) ||
|
568 |
|
|
rn_lexobetter(netmask, tt->rn_mask))))
|
569 |
|
|
break;
|
570 |
|
|
}
|
571 |
|
|
/*
|
572 |
|
|
* If the mask is not duplicated, we wouldn't
|
573 |
|
|
* find it among possible duplicate key entries
|
574 |
|
|
* anyway, so the above test doesn't hurt.
|
575 |
|
|
*
|
576 |
|
|
* We sort the masks for a duplicated key the same way as
|
577 |
|
|
* in a masklist -- most specific to least specific.
|
578 |
|
|
* This may require the unfortunate nuisance of relocating
|
579 |
|
|
* the head of the list.
|
580 |
|
|
*/
|
581 |
|
|
if (tt == saved_tt) {
|
582 |
|
|
struct radix_node *xx = x;
|
583 |
|
|
/* link in at head of list */
|
584 |
|
|
(tt = treenodes)->rn_dupedkey = t;
|
585 |
|
|
tt->rn_flags = t->rn_flags;
|
586 |
|
|
tt->rn_p = x = t->rn_p;
|
587 |
|
|
t->rn_p = tt; /* parent */
|
588 |
|
|
if (x->rn_l == t) x->rn_l = tt; else x->rn_r = tt;
|
589 |
|
|
saved_tt = tt; x = xx;
|
590 |
|
|
} else {
|
591 |
|
|
(tt = treenodes)->rn_dupedkey = t->rn_dupedkey;
|
592 |
|
|
t->rn_dupedkey = tt;
|
593 |
|
|
tt->rn_p = t; /* parent */
|
594 |
|
|
if (tt->rn_dupedkey) /* parent */
|
595 |
|
|
tt->rn_dupedkey->rn_p = tt; /* parent */
|
596 |
|
|
}
|
597 |
|
|
#ifdef RN_DEBUG
|
598 |
|
|
t=tt+1; tt->rn_info = rn_nodenum++; t->rn_info = rn_nodenum++;
|
599 |
|
|
tt->rn_twin = t; tt->rn_ybro = rn_clist; rn_clist = tt;
|
600 |
|
|
#endif
|
601 |
|
|
tt->rn_key = (caddr_t) v;
|
602 |
|
|
tt->rn_b = -1;
|
603 |
|
|
tt->rn_flags = RNF_ACTIVE;
|
604 |
|
|
}
|
605 |
|
|
/*
|
606 |
|
|
* Put mask in tree.
|
607 |
|
|
*/
|
608 |
|
|
if (netmask) {
|
609 |
|
|
tt->rn_mask = netmask;
|
610 |
|
|
tt->rn_b = x->rn_b;
|
611 |
|
|
tt->rn_flags |= x->rn_flags & RNF_NORMAL;
|
612 |
|
|
}
|
613 |
|
|
t = saved_tt->rn_p;
|
614 |
|
|
if (keyduplicated)
|
615 |
|
|
goto on2;
|
616 |
|
|
b_leaf = -1 - t->rn_b;
|
617 |
|
|
if (t->rn_r == saved_tt) x = t->rn_l; else x = t->rn_r;
|
618 |
|
|
/* Promote general routes from below */
|
619 |
|
|
if (x->rn_b < 0) {
|
620 |
|
|
for (mp = &t->rn_mklist; x; x = x->rn_dupedkey)
|
621 |
|
|
if (x->rn_mask && (x->rn_b >= b_leaf) && x->rn_mklist == 0) {
|
622 |
|
|
*mp = m = rn_new_radix_mask(x, 0);
|
623 |
|
|
if (m)
|
624 |
|
|
mp = &m->rm_mklist;
|
625 |
|
|
}
|
626 |
|
|
} else if (x->rn_mklist) {
|
627 |
|
|
/*
|
628 |
|
|
* Skip over masks whose index is > that of new node
|
629 |
|
|
*/
|
630 |
|
|
for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
|
631 |
|
|
if (m->rm_b >= b_leaf)
|
632 |
|
|
break;
|
633 |
|
|
t->rn_mklist = m; *mp = 0;
|
634 |
|
|
}
|
635 |
|
|
on2:
|
636 |
|
|
/* Add new route to highest possible ancestor's list */
|
637 |
|
|
if ((netmask == 0) || (b > t->rn_b ))
|
638 |
|
|
return tt; /* can't lift at all */
|
639 |
|
|
b_leaf = tt->rn_b;
|
640 |
|
|
do {
|
641 |
|
|
x = t;
|
642 |
|
|
t = t->rn_p;
|
643 |
|
|
} while (b <= t->rn_b && x != top);
|
644 |
|
|
/*
|
645 |
|
|
* Search through routes associated with node to
|
646 |
|
|
* insert new route according to index.
|
647 |
|
|
* Need same criteria as when sorting dupedkeys to avoid
|
648 |
|
|
* double loop on deletion.
|
649 |
|
|
*/
|
650 |
|
|
for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist) {
|
651 |
|
|
if (m->rm_b < b_leaf)
|
652 |
|
|
continue;
|
653 |
|
|
if (m->rm_b > b_leaf)
|
654 |
|
|
break;
|
655 |
|
|
if (m->rm_flags & RNF_NORMAL) {
|
656 |
|
|
mmask = m->rm_leaf->rn_mask;
|
657 |
|
|
if (tt->rn_flags & RNF_NORMAL) {
|
658 |
|
|
log(LOG_ERR,
|
659 |
|
|
"Non-unique normal route, mask not entered");
|
660 |
|
|
return tt;
|
661 |
|
|
}
|
662 |
|
|
} else
|
663 |
|
|
mmask = m->rm_mask;
|
664 |
|
|
if (mmask == netmask) {
|
665 |
|
|
m->rm_refs++;
|
666 |
|
|
tt->rn_mklist = m;
|
667 |
|
|
return tt;
|
668 |
|
|
}
|
669 |
|
|
if (rn_refines(netmask, mmask) || rn_lexobetter(netmask, mmask))
|
670 |
|
|
break;
|
671 |
|
|
}
|
672 |
|
|
*mp = rn_new_radix_mask(tt, *mp);
|
673 |
|
|
return tt;
|
674 |
|
|
}
|
675 |
|
|
|
676 |
|
|
static struct radix_node *
|
677 |
|
|
rn_delete(v_arg, netmask_arg, head)
|
678 |
|
|
void *v_arg, *netmask_arg;
|
679 |
|
|
struct radix_node_head *head;
|
680 |
|
|
{
|
681 |
|
|
register struct radix_node *t, *p, *x, *tt;
|
682 |
|
|
struct radix_mask *m, *saved_m, **mp;
|
683 |
|
|
struct radix_node *dupedkey, *saved_tt, *top;
|
684 |
|
|
caddr_t v, netmask;
|
685 |
|
|
int b, head_off, vlen;
|
686 |
|
|
|
687 |
|
|
v = v_arg;
|
688 |
|
|
netmask = netmask_arg;
|
689 |
|
|
x = head->rnh_treetop;
|
690 |
|
|
tt = rn_search(v, x);
|
691 |
|
|
head_off = x->rn_off;
|
692 |
|
|
vlen = *(u_char *)v;
|
693 |
|
|
saved_tt = tt;
|
694 |
|
|
top = x;
|
695 |
|
|
if (tt == 0 ||
|
696 |
|
|
Bcmp(v + head_off, tt->rn_key + head_off, vlen - head_off))
|
697 |
|
|
return (0);
|
698 |
|
|
/*
|
699 |
|
|
* Delete our route from mask lists.
|
700 |
|
|
*/
|
701 |
|
|
if (netmask) {
|
702 |
|
|
if ((x = rn_addmask(netmask, 1, head_off)) == 0)
|
703 |
|
|
return (0);
|
704 |
|
|
netmask = x->rn_key;
|
705 |
|
|
while (tt->rn_mask != netmask)
|
706 |
|
|
if ((tt = tt->rn_dupedkey) == 0)
|
707 |
|
|
return (0);
|
708 |
|
|
}
|
709 |
|
|
if (tt->rn_mask == 0 || (saved_m = m = tt->rn_mklist) == 0)
|
710 |
|
|
goto on1;
|
711 |
|
|
if (tt->rn_flags & RNF_NORMAL) {
|
712 |
|
|
if (m->rm_leaf != tt || m->rm_refs > 0) {
|
713 |
|
|
log(LOG_ERR, "rn_delete: inconsistent annotation\n");
|
714 |
|
|
return 0; /* dangling ref could cause disaster */
|
715 |
|
|
}
|
716 |
|
|
} else {
|
717 |
|
|
if (m->rm_mask != tt->rn_mask) {
|
718 |
|
|
log(LOG_ERR, "rn_delete: inconsistent annotation\n");
|
719 |
|
|
goto on1;
|
720 |
|
|
}
|
721 |
|
|
if (--m->rm_refs >= 0)
|
722 |
|
|
goto on1;
|
723 |
|
|
}
|
724 |
|
|
b = -1 - tt->rn_b;
|
725 |
|
|
t = saved_tt->rn_p;
|
726 |
|
|
if (b > t->rn_b)
|
727 |
|
|
goto on1; /* Wasn't lifted at all */
|
728 |
|
|
do {
|
729 |
|
|
x = t;
|
730 |
|
|
t = t->rn_p;
|
731 |
|
|
} while (b <= t->rn_b && x != top);
|
732 |
|
|
for (mp = &x->rn_mklist; (m = *mp); mp = &m->rm_mklist)
|
733 |
|
|
if (m == saved_m) {
|
734 |
|
|
*mp = m->rm_mklist;
|
735 |
|
|
MKFree(m);
|
736 |
|
|
break;
|
737 |
|
|
}
|
738 |
|
|
if (m == 0) {
|
739 |
|
|
log(LOG_ERR, "rn_delete: couldn't find our annotation\n");
|
740 |
|
|
if (tt->rn_flags & RNF_NORMAL)
|
741 |
|
|
return (0); /* Dangling ref to us */
|
742 |
|
|
}
|
743 |
|
|
on1:
|
744 |
|
|
/*
|
745 |
|
|
* Eliminate us from tree
|
746 |
|
|
*/
|
747 |
|
|
if (tt->rn_flags & RNF_ROOT)
|
748 |
|
|
return (0);
|
749 |
|
|
#ifdef RN_DEBUG
|
750 |
|
|
/* Get us out of the creation list */
|
751 |
|
|
for (t = rn_clist; t && t->rn_ybro != tt; t = t->rn_ybro) {}
|
752 |
|
|
if (t) t->rn_ybro = tt->rn_ybro;
|
753 |
|
|
#endif
|
754 |
|
|
t = tt->rn_p;
|
755 |
|
|
dupedkey = saved_tt->rn_dupedkey;
|
756 |
|
|
if (dupedkey) {
|
757 |
|
|
/*
|
758 |
|
|
* at this point, tt is the deletion target and saved_tt
|
759 |
|
|
* is the head of the dupekey chain
|
760 |
|
|
*/
|
761 |
|
|
if (tt == saved_tt) {
|
762 |
|
|
/* remove from head of chain */
|
763 |
|
|
x = dupedkey; x->rn_p = t;
|
764 |
|
|
if (t->rn_l == tt) t->rn_l = x; else t->rn_r = x;
|
765 |
|
|
} else {
|
766 |
|
|
/* find node in front of tt on the chain */
|
767 |
|
|
for (x = p = saved_tt; p && p->rn_dupedkey != tt;)
|
768 |
|
|
p = p->rn_dupedkey;
|
769 |
|
|
if (p) {
|
770 |
|
|
p->rn_dupedkey = tt->rn_dupedkey;
|
771 |
|
|
if (tt->rn_dupedkey) /* parent */
|
772 |
|
|
tt->rn_dupedkey->rn_p = p; /* parent */
|
773 |
|
|
} else log(LOG_ERR, "rn_delete: couldn't find us\n");
|
774 |
|
|
}
|
775 |
|
|
t = tt + 1;
|
776 |
|
|
if (t->rn_flags & RNF_ACTIVE) {
|
777 |
|
|
#ifndef RN_DEBUG
|
778 |
|
|
*++x = *t; p = t->rn_p;
|
779 |
|
|
#else
|
780 |
|
|
b = t->rn_info; *++x = *t; t->rn_info = b; p = t->rn_p;
|
781 |
|
|
#endif
|
782 |
|
|
if (p->rn_l == t) p->rn_l = x; else p->rn_r = x;
|
783 |
|
|
x->rn_l->rn_p = x; x->rn_r->rn_p = x;
|
784 |
|
|
}
|
785 |
|
|
goto out;
|
786 |
|
|
}
|
787 |
|
|
if (t->rn_l == tt) x = t->rn_r; else x = t->rn_l;
|
788 |
|
|
p = t->rn_p;
|
789 |
|
|
if (p->rn_r == t) p->rn_r = x; else p->rn_l = x;
|
790 |
|
|
x->rn_p = p;
|
791 |
|
|
/*
|
792 |
|
|
* Demote routes attached to us.
|
793 |
|
|
*/
|
794 |
|
|
if (t->rn_mklist) {
|
795 |
|
|
if (x->rn_b >= 0) {
|
796 |
|
|
for (mp = &x->rn_mklist; (m = *mp);)
|
797 |
|
|
mp = &m->rm_mklist;
|
798 |
|
|
*mp = t->rn_mklist;
|
799 |
|
|
} else {
|
800 |
|
|
/* If there are any key,mask pairs in a sibling
|
801 |
|
|
duped-key chain, some subset will appear sorted
|
802 |
|
|
in the same order attached to our mklist */
|
803 |
|
|
for (m = t->rn_mklist; m && x; x = x->rn_dupedkey)
|
804 |
|
|
if (m == x->rn_mklist) {
|
805 |
|
|
struct radix_mask *mm = m->rm_mklist;
|
806 |
|
|
x->rn_mklist = 0;
|
807 |
|
|
if (--(m->rm_refs) < 0)
|
808 |
|
|
MKFree(m);
|
809 |
|
|
m = mm;
|
810 |
|
|
}
|
811 |
|
|
if (m)
|
812 |
|
|
log(LOG_ERR, "%s %p at %x\n",
|
813 |
|
|
"rn_delete: Orphaned Mask", m, x);
|
814 |
|
|
}
|
815 |
|
|
}
|
816 |
|
|
/*
|
817 |
|
|
* We may be holding an active internal node in the tree.
|
818 |
|
|
*/
|
819 |
|
|
x = tt + 1;
|
820 |
|
|
if (t != x) {
|
821 |
|
|
#ifndef RN_DEBUG
|
822 |
|
|
*t = *x;
|
823 |
|
|
#else
|
824 |
|
|
b = t->rn_info; *t = *x; t->rn_info = b;
|
825 |
|
|
#endif
|
826 |
|
|
t->rn_l->rn_p = t; t->rn_r->rn_p = t;
|
827 |
|
|
p = x->rn_p;
|
828 |
|
|
if (p->rn_l == x) p->rn_l = t; else p->rn_r = t;
|
829 |
|
|
}
|
830 |
|
|
out:
|
831 |
|
|
tt->rn_flags &= ~RNF_ACTIVE;
|
832 |
|
|
tt[1].rn_flags &= ~RNF_ACTIVE;
|
833 |
|
|
return (tt);
|
834 |
|
|
}
|
835 |
|
|
|
836 |
|
|
/*
|
837 |
|
|
* This is the same as rn_walktree() except for the parameters and the
|
838 |
|
|
* exit.
|
839 |
|
|
*/
|
840 |
|
|
static int
|
841 |
|
|
rn_walktree_from(h, a, m, f, w)
|
842 |
|
|
struct radix_node_head *h;
|
843 |
|
|
void *a, *m;
|
844 |
|
|
walktree_f_t *f;
|
845 |
|
|
void *w;
|
846 |
|
|
{
|
847 |
|
|
int error;
|
848 |
|
|
struct radix_node *base, *next;
|
849 |
|
|
u_char *xa = (u_char *)a;
|
850 |
|
|
u_char *xm = (u_char *)m;
|
851 |
|
|
register struct radix_node *rn, *last = 0 /* shut up gcc */;
|
852 |
|
|
int stopping = 0;
|
853 |
|
|
int lastb;
|
854 |
|
|
|
855 |
|
|
/*
|
856 |
|
|
* rn_search_m is sort-of-open-coded here.
|
857 |
|
|
*/
|
858 |
|
|
/* printf("about to search\n"); */
|
859 |
|
|
for (rn = h->rnh_treetop; rn->rn_b >= 0; ) {
|
860 |
|
|
last = rn;
|
861 |
|
|
/* printf("rn_b %d, rn_bmask %x, xm[rn_off] %x\n",
|
862 |
|
|
rn->rn_b, rn->rn_bmask, xm[rn->rn_off]); */
|
863 |
|
|
if (!(rn->rn_bmask & xm[rn->rn_off])) {
|
864 |
|
|
break;
|
865 |
|
|
}
|
866 |
|
|
if (rn->rn_bmask & xa[rn->rn_off]) {
|
867 |
|
|
rn = rn->rn_r;
|
868 |
|
|
} else {
|
869 |
|
|
rn = rn->rn_l;
|
870 |
|
|
}
|
871 |
|
|
}
|
872 |
|
|
/* printf("done searching\n"); */
|
873 |
|
|
|
874 |
|
|
/*
|
875 |
|
|
* Two cases: either we stepped off the end of our mask,
|
876 |
|
|
* in which case last == rn, or we reached a leaf, in which
|
877 |
|
|
* case we want to start from the last node we looked at.
|
878 |
|
|
* Either way, last is the node we want to start from.
|
879 |
|
|
*/
|
880 |
|
|
rn = last;
|
881 |
|
|
lastb = rn->rn_b;
|
882 |
|
|
|
883 |
|
|
/* printf("rn %p, lastb %d\n", rn, lastb);*/
|
884 |
|
|
|
885 |
|
|
/*
|
886 |
|
|
* This gets complicated because we may delete the node
|
887 |
|
|
* while applying the function f to it, so we need to calculate
|
888 |
|
|
* the successor node in advance.
|
889 |
|
|
*/
|
890 |
|
|
while (rn->rn_b >= 0)
|
891 |
|
|
rn = rn->rn_l;
|
892 |
|
|
|
893 |
|
|
while (!stopping) {
|
894 |
|
|
/* printf("node %p (%d)\n", rn, rn->rn_b); */
|
895 |
|
|
base = rn;
|
896 |
|
|
/* If at right child go back up, otherwise, go right */
|
897 |
|
|
while (rn->rn_p->rn_r == rn && !(rn->rn_flags & RNF_ROOT)) {
|
898 |
|
|
rn = rn->rn_p;
|
899 |
|
|
|
900 |
|
|
/* if went up beyond last, stop */
|
901 |
|
|
if (rn->rn_b < lastb) {
|
902 |
|
|
stopping = 1;
|
903 |
|
|
/* printf("up too far\n"); */
|
904 |
|
|
}
|
905 |
|
|
}
|
906 |
|
|
|
907 |
|
|
/* Find the next *leaf* since next node might vanish, too */
|
908 |
|
|
for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
|
909 |
|
|
rn = rn->rn_l;
|
910 |
|
|
next = rn;
|
911 |
|
|
/* Process leaves */
|
912 |
|
|
while ((rn = base) != 0) {
|
913 |
|
|
base = rn->rn_dupedkey;
|
914 |
|
|
/* printf("leaf %p\n", rn); */
|
915 |
|
|
if (!(rn->rn_flags & RNF_ROOT)
|
916 |
|
|
&& (error = (*f)(rn, w)))
|
917 |
|
|
return (error);
|
918 |
|
|
}
|
919 |
|
|
rn = next;
|
920 |
|
|
|
921 |
|
|
if (rn->rn_flags & RNF_ROOT) {
|
922 |
|
|
/* printf("root, stopping"); */
|
923 |
|
|
stopping = 1;
|
924 |
|
|
}
|
925 |
|
|
|
926 |
|
|
}
|
927 |
|
|
return 0;
|
928 |
|
|
}
|
929 |
|
|
|
930 |
|
|
static int
|
931 |
|
|
rn_walktree(h, f, w)
|
932 |
|
|
struct radix_node_head *h;
|
933 |
|
|
walktree_f_t *f;
|
934 |
|
|
void *w;
|
935 |
|
|
{
|
936 |
|
|
int error;
|
937 |
|
|
struct radix_node *base, *next;
|
938 |
|
|
register struct radix_node *rn = h->rnh_treetop;
|
939 |
|
|
/*
|
940 |
|
|
* This gets complicated because we may delete the node
|
941 |
|
|
* while applying the function f to it, so we need to calculate
|
942 |
|
|
* the successor node in advance.
|
943 |
|
|
*/
|
944 |
|
|
/* First time through node, go left */
|
945 |
|
|
while (rn->rn_b >= 0)
|
946 |
|
|
rn = rn->rn_l;
|
947 |
|
|
for (;;) {
|
948 |
|
|
base = rn;
|
949 |
|
|
/* If at right child go back up, otherwise, go right */
|
950 |
|
|
while (rn->rn_p->rn_r == rn && (rn->rn_flags & RNF_ROOT) == 0)
|
951 |
|
|
rn = rn->rn_p;
|
952 |
|
|
/* Find the next *leaf* since next node might vanish, too */
|
953 |
|
|
for (rn = rn->rn_p->rn_r; rn->rn_b >= 0;)
|
954 |
|
|
rn = rn->rn_l;
|
955 |
|
|
next = rn;
|
956 |
|
|
/* Process leaves */
|
957 |
|
|
while ((rn = base)) {
|
958 |
|
|
base = rn->rn_dupedkey;
|
959 |
|
|
if (!(rn->rn_flags & RNF_ROOT) && (error = (*f)(rn, w)))
|
960 |
|
|
return (error);
|
961 |
|
|
}
|
962 |
|
|
rn = next;
|
963 |
|
|
if (rn->rn_flags & RNF_ROOT)
|
964 |
|
|
return (0);
|
965 |
|
|
}
|
966 |
|
|
/* NOTREACHED */
|
967 |
|
|
}
|
968 |
|
|
|
969 |
|
|
int
|
970 |
|
|
rn_inithead(head, off)
|
971 |
|
|
void **head;
|
972 |
|
|
int off;
|
973 |
|
|
{
|
974 |
|
|
register struct radix_node_head *rnh;
|
975 |
|
|
register struct radix_node *t, *tt, *ttt;
|
976 |
|
|
if (*head)
|
977 |
|
|
return (1);
|
978 |
|
|
R_Malloc(rnh, struct radix_node_head *, sizeof (*rnh));
|
979 |
|
|
if (rnh == 0)
|
980 |
|
|
return (0);
|
981 |
|
|
Bzero(rnh, sizeof (*rnh));
|
982 |
|
|
*head = rnh;
|
983 |
|
|
t = rn_newpair(rn_zeros, off, rnh->rnh_nodes);
|
984 |
|
|
ttt = rnh->rnh_nodes + 2;
|
985 |
|
|
t->rn_r = ttt;
|
986 |
|
|
t->rn_p = t;
|
987 |
|
|
tt = t->rn_l;
|
988 |
|
|
tt->rn_flags = t->rn_flags = RNF_ROOT | RNF_ACTIVE;
|
989 |
|
|
tt->rn_b = -1 - off;
|
990 |
|
|
*ttt = *tt;
|
991 |
|
|
ttt->rn_key = rn_ones;
|
992 |
|
|
rnh->rnh_addaddr = rn_addroute;
|
993 |
|
|
rnh->rnh_deladdr = rn_delete;
|
994 |
|
|
rnh->rnh_matchaddr = rn_match;
|
995 |
|
|
rnh->rnh_lookup = rn_lookup;
|
996 |
|
|
rnh->rnh_walktree = rn_walktree;
|
997 |
|
|
rnh->rnh_walktree_from = rn_walktree_from;
|
998 |
|
|
rnh->rnh_treetop = t;
|
999 |
|
|
return (1);
|
1000 |
|
|
}
|
1001 |
|
|
|
1002 |
|
|
void
|
1003 |
|
|
rn_init()
|
1004 |
|
|
{
|
1005 |
|
|
char *cp, *cplim;
|
1006 |
|
|
#ifdef KERNEL
|
1007 |
|
|
struct domain *dom;
|
1008 |
|
|
|
1009 |
|
|
for (dom = domains; dom; dom = dom->dom_next)
|
1010 |
|
|
if (dom->dom_maxrtkey > max_keylen)
|
1011 |
|
|
max_keylen = dom->dom_maxrtkey;
|
1012 |
|
|
#endif
|
1013 |
|
|
if (max_keylen == 0) {
|
1014 |
|
|
log(LOG_ERR,
|
1015 |
|
|
"rn_init: radix functions require max_keylen be set\n");
|
1016 |
|
|
return;
|
1017 |
|
|
}
|
1018 |
|
|
R_Malloc(rn_zeros, char *, 3 * max_keylen);
|
1019 |
|
|
if (rn_zeros == NULL)
|
1020 |
|
|
panic("rn_init");
|
1021 |
|
|
Bzero(rn_zeros, 3 * max_keylen);
|
1022 |
|
|
rn_ones = cp = rn_zeros + max_keylen;
|
1023 |
|
|
addmask_key = cplim = rn_ones + max_keylen;
|
1024 |
|
|
while (cp < cplim)
|
1025 |
|
|
*cp++ = -1;
|
1026 |
|
|
if (rn_inithead((void **)&mask_rnhead, 0) == 0)
|
1027 |
|
|
panic("rn_init 2");
|
1028 |
|
|
}
|