OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [rc203soc/] [sw/] [uClinux/] [net/] [ipv4/] [route.c] - Blame information for rev 1782

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1629 jcastillo
/*
2
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
3
 *              operating system.  INET is implemented using the  BSD Socket
4
 *              interface as the means of communication with the user level.
5
 *
6
 *              ROUTE - implementation of the IP router.
7
 *
8
 * Version:     @(#)route.c     1.0.14  05/31/93
9
 *
10
 * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
11
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
13
 *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
14
 *
15
 * Fixes:
16
 *              Alan Cox        :       Verify area fixes.
17
 *              Alan Cox        :       cli() protects routing changes
18
 *              Rui Oliveira    :       ICMP routing table updates
19
 *              (rco@di.uminho.pt)      Routing table insertion and update
20
 *              Linus Torvalds  :       Rewrote bits to be sensible
21
 *              Alan Cox        :       Added BSD route gw semantics
22
 *              Alan Cox        :       Super /proc >4K
23
 *              Alan Cox        :       MTU in route table
24
 *              Alan Cox        :       MSS actually. Also added the window
25
 *                                      clamper.
26
 *              Sam Lantinga    :       Fixed route matching in rt_del()
27
 *              Alan Cox        :       Routing cache support.
28
 *              Alan Cox        :       Removed compatibility cruft.
29
 *              Alan Cox        :       RTF_REJECT support.
30
 *              Alan Cox        :       TCP irtt support.
31
 *              Jonathan Naylor :       Added Metric support.
32
 *      Miquel van Smoorenburg  :       BSD API fixes.
33
 *      Miquel van Smoorenburg  :       Metrics.
34
 *              Alan Cox        :       Use __u32 properly
35
 *              Alan Cox        :       Aligned routing errors more closely with BSD
36
 *                                      our system is still very different.
37
 *              Alan Cox        :       Faster /proc handling
38
 *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
39
 *                                      routing caches and better behaviour.
40
 *
41
 *              Olaf Erb        :       irtt wasn't being copied right.
42
 *              Bjorn Ekwall    :       Kerneld route support.
43
 *              Alan Cox        :       Multicast fixed (I hope)
44
 *              Pavel Krauz     :       Limited broadcast fixed
45
 *              Elliot Poger    :       Added support for SO_BINDTODEVICE.
46
 *              Andi Kleen      :       Don't send multicast addresses to
47
 *                                      kerneld.
48
 *              Wolfgang Walter :       make rt_free() non-static
49
 *
50
 *      Juan Jose Ciarlante     :       Added ip_rt_dev
51
 *              This program is free software; you can redistribute it and/or
52
 *              modify it under the terms of the GNU General Public License
53
 *              as published by the Free Software Foundation; either version
54
 *              2 of the License, or (at your option) any later version.
55
 */
56
 
57
#include <linux/config.h>
58
#include <asm/segment.h>
59
#include <asm/system.h>
60
#include <asm/bitops.h>
61
#include <linux/types.h>
62
#include <linux/kernel.h>
63
#include <linux/sched.h>
64
#include <linux/mm.h>
65
#include <linux/string.h>
66
#include <linux/socket.h>
67
#include <linux/sockios.h>
68
#include <linux/errno.h>
69
#include <linux/in.h>
70
#include <linux/inet.h>
71
#include <linux/netdevice.h>
72
#include <linux/if_arp.h>
73
#include <net/ip.h>
74
#include <net/protocol.h>
75
#include <net/route.h>
76
#include <net/tcp.h>
77
#include <linux/skbuff.h>
78
#include <net/sock.h>
79
#include <net/icmp.h>
80
#include <net/netlink.h>
81
#ifdef CONFIG_KERNELD
82
#include <linux/kerneld.h>
83
#endif
84
 
85
/*
86
 * Forwarding Information Base definitions.
87
 */
88
 
89
struct fib_node
90
{
91
        struct fib_node         *fib_next;
92
        __u32                   fib_dst;
93
        unsigned long           fib_use;
94
        struct fib_info         *fib_info;
95
        short                   fib_metric;
96
        unsigned char           fib_tos;
97
};
98
 
99
/*
100
 * This structure contains data shared by many of routes.
101
 */
102
 
103
struct fib_info
104
{
105
        struct fib_info         *fib_next;
106
        struct fib_info         *fib_prev;
107
        __u32                   fib_gateway;
108
        struct device           *fib_dev;
109
        int                     fib_refcnt;
110
        unsigned long           fib_window;
111
        unsigned short          fib_flags;
112
        unsigned short          fib_mtu;
113
        unsigned short          fib_irtt;
114
};
115
 
116
struct fib_zone
117
{
118
        struct fib_zone *fz_next;
119
        struct fib_node **fz_hash_table;
120
        struct fib_node *fz_list;
121
        int             fz_nent;
122
        int             fz_logmask;
123
        __u32           fz_mask;
124
};
125
 
126
static struct fib_zone  *fib_zones[33];
127
static struct fib_zone  *fib_zone_list;
128
static struct fib_node  *fib_loopback = NULL;
129
static struct fib_info  *fib_info_list;
130
 
131
/*
132
 * Backlogging.
133
 */
134
 
135
#define RT_BH_REDIRECT          1
136
#define RT_BH_GARBAGE_COLLECT   2
137
#define RT_BH_FREE              4
138
 
139
struct rt_req
140
{
141
        struct rt_req * rtr_next;
142
        struct device *dev;
143
        __u32 dst;
144
        __u32 gw;
145
        unsigned char tos;
146
};
147
 
148
int                     ip_rt_lock;
149
unsigned                ip_rt_bh_mask;
150
static struct rt_req    *rt_backlog;
151
 
152
/*
153
 * Route cache.
154
 */
155
 
156
struct rtable           *ip_rt_hash_table[RT_HASH_DIVISOR];
157
static int              rt_cache_size;
158
static struct rtable    *rt_free_queue;
159
struct wait_queue       *rt_wait;
160
 
161
static void rt_kick_backlog(void);
162
static void rt_cache_add(unsigned hash, struct rtable * rth);
163
static void rt_cache_flush(void);
164
static void rt_garbage_collect_1(void);
165
 
166
/*
167
 * Evaluate mask length.
168
 */
169
 
170
static __inline__ int rt_logmask(__u32 mask)
171
{
172
        if (!(mask = ntohl(mask)))
173
                return 32;
174
        return ffz(~mask);
175
}
176
 
177
/*
178
 * Create mask from length.
179
 */
180
 
181
static __inline__ __u32 rt_mask(int logmask)
182
{
183
        if (logmask >= 32)
184
                return 0;
185
        return htonl(~((1<<logmask)-1));
186
}
187
 
188
static __inline__ unsigned fz_hash_code(__u32 dst, int logmask)
189
{
190
        return ip_rt_hash_code(ntohl(dst)>>logmask);
191
}
192
 
193
/*
194
 * Free FIB node.
195
 */
196
 
197
static void fib_free_node(struct fib_node * f)
198
{
199
        struct fib_info * fi = f->fib_info;
200
        if (!--fi->fib_refcnt)
201
        {
202
#if RT_CACHE_DEBUG >= 2
203
                printk("fib_free_node: fi %08x/%s is free\n", fi->fib_gateway, fi->fib_dev->name);
204
#endif
205
                if (fi->fib_next)
206
                        fi->fib_next->fib_prev = fi->fib_prev;
207
                if (fi->fib_prev)
208
                        fi->fib_prev->fib_next = fi->fib_next;
209
                if (fi == fib_info_list)
210
                        fib_info_list = fi->fib_next;
211
                kfree_s(fi, sizeof(struct fib_info));
212
        }
213
        kfree_s(f, sizeof(struct fib_node));
214
}
215
 
216
/*
217
 * Find gateway route by address.
218
 */
219
 
220
static struct fib_node * fib_lookup_gateway(__u32 dst)
221
{
222
        struct fib_zone * fz;
223
        struct fib_node * f;
224
 
225
        for (fz = fib_zone_list; fz; fz = fz->fz_next)
226
        {
227
                if (fz->fz_hash_table)
228
                        f = fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)];
229
                else
230
                        f = fz->fz_list;
231
 
232
                for ( ; f; f = f->fib_next)
233
                {
234
                        if (((dst ^ f->fib_dst) & fz->fz_mask) ||
235
                            (f->fib_info->fib_flags & RTF_GATEWAY))
236
                                continue;
237
                        return f;
238
                }
239
        }
240
        return NULL;
241
}
242
 
243
/*
244
 * Find local route by address.
245
 * FIXME: I use "longest match" principle. If destination
246
 *        has some non-local route, I'll not search shorter matches.
247
 *        It's possible, I'm wrong, but I wanted to prevent following
248
 *        situation:
249
 *      route add 193.233.7.128 netmask 255.255.255.192 gw xxxxxx
250
 *      route add 193.233.7.0   netmask 255.255.255.0 eth1
251
 *        (Two ethernets connected by serial line, one is small and other is large)
252
 *        Host 193.233.7.129 is locally unreachable,
253
 *        but old (<=1.3.37) code will send packets destined for it to eth1.
254
 *
255
 * Calling routine can specify a particular interface by setting dev.  If dev==NULL,
256
 * any interface will do.
257
 */
258
 
259
static struct fib_node * fib_lookup_local(__u32 dst, struct device *dev)
260
{
261
        struct fib_zone * fz;
262
        struct fib_node * f;
263
 
264
        for (fz = fib_zone_list; fz; fz = fz->fz_next)
265
        {
266
                int longest_match_found = 0;
267
 
268
                if (fz->fz_hash_table)
269
                        f = fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)];
270
                else
271
                        f = fz->fz_list;
272
 
273
                for ( ; f; f = f->fib_next)
274
                {
275
                        if ((dst ^ f->fib_dst) & fz->fz_mask)
276
                                continue;
277
                        if ( (dev != NULL) && (dev != f->fib_info->fib_dev) )
278
                                continue;
279
                        if (!(f->fib_info->fib_flags & RTF_GATEWAY))
280
                                return f;
281
                        longest_match_found = 1;
282
                }
283
                if (longest_match_found)
284
                        return NULL;
285
        }
286
        return NULL;
287
}
288
 
289
/*
290
 * Main lookup routine.
291
 *      IMPORTANT NOTE: this algorithm has small difference from <=1.3.37 visible
292
 *      by user. It doesn't route non-CIDR broadcasts by default.
293
 *
294
 *      F.e.
295
 *              ifconfig eth0 193.233.7.65 netmask 255.255.255.192 broadcast 193.233.7.255
296
 *      is valid, but if you really are not able (not allowed, do not want) to
297
 *      use CIDR compliant broadcast 193.233.7.127, you should add host route:
298
 *              route add -host 193.233.7.255 eth0
299
 */
300
 
301
static struct fib_node * fib_lookup(__u32 dst, struct device *dev)
302
{
303
        struct fib_zone * fz;
304
        struct fib_node * f;
305
 
306
        for (fz = fib_zone_list; fz; fz = fz->fz_next)
307
        {
308
                if (fz->fz_hash_table)
309
                        f = fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)];
310
                else
311
                        f = fz->fz_list;
312
 
313
                for ( ; f; f = f->fib_next)
314
                {
315
                        if ((dst ^ f->fib_dst) & fz->fz_mask)
316
                                continue;
317
                        if ( (dev != NULL) && (dev != f->fib_info->fib_dev) )
318
                                continue;
319
                        return f;
320
                }
321
        }
322
        return NULL;
323
}
324
 
325
static __inline__ struct device * get_gw_dev(__u32 gw)
326
{
327
        struct fib_node * f;
328
        f = fib_lookup_gateway(gw);
329
        if (f)
330
                return f->fib_info->fib_dev;
331
        return NULL;
332
}
333
 
334
/*
335
 *      Check if a mask is acceptable.
336
 */
337
 
338
static inline int bad_mask(__u32 mask, __u32 addr)
339
{
340
        if (addr & (mask = ~mask))
341
                return 1;
342
        mask = ntohl(mask);
343
        if (mask & (mask+1))
344
                return 1;
345
        return 0;
346
}
347
 
348
 
349
static int fib_del_list(struct fib_node **fp, __u32 dst,
350
                struct device * dev, __u32 gtw, short flags, short metric, __u32 mask)
351
{
352
        struct fib_node *f;
353
        int found=0;
354
 
355
        while((f = *fp) != NULL)
356
        {
357
                struct fib_info * fi = f->fib_info;
358
 
359
                /*
360
                 *      Make sure the destination and netmask match.
361
                 *      metric, gateway and device are also checked
362
                 *      if they were specified.
363
                 */
364
                if (f->fib_dst != dst ||
365
                    (gtw && fi->fib_gateway != gtw) ||
366
                    (metric >= 0 && f->fib_metric != metric) ||
367
                    (dev && fi->fib_dev != dev) )
368
                {
369
                        fp = &f->fib_next;
370
                        continue;
371
                }
372
                cli();
373
                *fp = f->fib_next;
374
                if (fib_loopback == f)
375
                        fib_loopback = NULL;
376
                sti();
377
                ip_netlink_msg(RTMSG_DELROUTE, dst, gtw, mask, flags, metric, fi->fib_dev->name);
378
                fib_free_node(f);
379
                found++;
380
        }
381
        return found;
382
}
383
 
384
static __inline__ int fib_del_1(__u32 dst, __u32 mask,
385
                struct device * dev, __u32 gtw, short flags, short metric)
386
{
387
        struct fib_node **fp;
388
        struct fib_zone *fz;
389
        int found=0;
390
 
391
        if (!mask)
392
        {
393
                for (fz=fib_zone_list; fz; fz = fz->fz_next)
394
                {
395
                        int tmp;
396
                        if (fz->fz_hash_table)
397
                                fp = &fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)];
398
                        else
399
                                fp = &fz->fz_list;
400
 
401
                        tmp = fib_del_list(fp, dst, dev, gtw, flags, metric, mask);
402
                        fz->fz_nent -= tmp;
403
                        found += tmp;
404
                }
405
        }
406
        else
407
        {
408
                if ((fz = fib_zones[rt_logmask(mask)]) != NULL)
409
                {
410
                        if (fz->fz_hash_table)
411
                                fp = &fz->fz_hash_table[fz_hash_code(dst, fz->fz_logmask)];
412
                        else
413
                                fp = &fz->fz_list;
414
 
415
                        found = fib_del_list(fp, dst, dev, gtw, flags, metric, mask);
416
                        fz->fz_nent -= found;
417
                }
418
        }
419
 
420
        if (found)
421
        {
422
                rt_cache_flush();
423
                return 0;
424
        }
425
        return -ESRCH;
426
}
427
 
428
 
429
static struct fib_info * fib_create_info(__u32 gw, struct device * dev,
430
                                         unsigned short flags, unsigned short mss,
431
                                         unsigned long window, unsigned short irtt)
432
{
433
        struct fib_info * fi;
434
 
435
        if (!(flags & RTF_MSS))
436
        {
437
                mss = dev->mtu;
438
#ifdef CONFIG_NO_PATH_MTU_DISCOVERY
439
                /*
440
                 *      If MTU was not specified, use default.
441
                 *      If you want to increase MTU for some net (local subnet)
442
                 *      use "route add .... mss xxx".
443
                 *
444
                 *      The MTU isn't currently always used and computed as it
445
                 *      should be as far as I can tell. [Still verifying this is right]
446
                 */
447
                if ((flags & RTF_GATEWAY) && mss > 576)
448
                        mss = 576;
449
#endif
450
        }
451
        if (!(flags & RTF_WINDOW))
452
                window = 0;
453
        if (!(flags & RTF_IRTT))
454
                irtt = 0;
455
 
456
        for (fi=fib_info_list; fi; fi = fi->fib_next)
457
        {
458
                if (fi->fib_gateway != gw ||
459
                    fi->fib_dev != dev  ||
460
                    fi->fib_flags != flags ||
461
                    fi->fib_mtu != mss ||
462
                    fi->fib_window != window ||
463
                    fi->fib_irtt != irtt)
464
                        continue;
465
                fi->fib_refcnt++;
466
#if RT_CACHE_DEBUG >= 2
467
                printk("fib_create_info: fi %08x/%s is duplicate\n", fi->fib_gateway, fi->fib_dev->name);
468
#endif
469
                return fi;
470
        }
471
        fi = (struct fib_info*)kmalloc(sizeof(struct fib_info), GFP_KERNEL);
472
        if (!fi)
473
                return NULL;
474
        memset(fi, 0, sizeof(struct fib_info));
475
        fi->fib_flags = flags;
476
        fi->fib_dev = dev;
477
        fi->fib_gateway = gw;
478
        fi->fib_mtu = mss;
479
        fi->fib_window = window;
480
        fi->fib_refcnt++;
481
        fi->fib_next = fib_info_list;
482
        fi->fib_prev = NULL;
483
        fi->fib_irtt = irtt;
484
        if (fib_info_list)
485
                fib_info_list->fib_prev = fi;
486
        fib_info_list = fi;
487
#if RT_CACHE_DEBUG >= 2
488
        printk("fib_create_info: fi %08x/%s is created\n", fi->fib_gateway, fi->fib_dev->name);
489
#endif
490
        return fi;
491
}
492
 
493
 
494
static __inline__ void fib_add_1(short flags, __u32 dst, __u32 mask,
495
        __u32 gw, struct device *dev, unsigned short mss,
496
        unsigned long window, unsigned short irtt, short metric)
497
{
498
        struct fib_node *f, *f1;
499
        struct fib_node **fp;
500
        struct fib_node **dup_fp = NULL;
501
        struct fib_zone * fz;
502
        struct fib_info * fi;
503
        int logmask;
504
 
505
        /*
506
         *      Allocate an entry and fill it in.
507
         */
508
 
509
        f = (struct fib_node *) kmalloc(sizeof(struct fib_node), GFP_KERNEL);
510
        if (f == NULL)
511
                return;
512
 
513
        memset(f, 0, sizeof(struct fib_node));
514
        f->fib_dst = dst;
515
        f->fib_metric = metric;
516
        f->fib_tos    = 0;
517
 
518
        if  ((fi = fib_create_info(gw, dev, flags, mss, window, irtt)) == NULL)
519
        {
520
                kfree_s(f, sizeof(struct fib_node));
521
                return;
522
        }
523
        f->fib_info = fi;
524
 
525
        logmask = rt_logmask(mask);
526
        fz = fib_zones[logmask];
527
 
528
 
529
        if (!fz)
530
        {
531
                int i;
532
                fz = kmalloc(sizeof(struct fib_zone), GFP_KERNEL);
533
                if (!fz)
534
                {
535
                        fib_free_node(f);
536
                        return;
537
                }
538
                memset(fz, 0, sizeof(struct fib_zone));
539
                fz->fz_logmask = logmask;
540
                fz->fz_mask = mask;
541
                for (i=logmask-1; i>=0; i--)
542
                        if (fib_zones[i])
543
                                break;
544
                cli();
545
                if (i<0)
546
                {
547
                        fz->fz_next = fib_zone_list;
548
                        fib_zone_list = fz;
549
                }
550
                else
551
                {
552
                        fz->fz_next = fib_zones[i]->fz_next;
553
                        fib_zones[i]->fz_next = fz;
554
                }
555
                fib_zones[logmask] = fz;
556
                sti();
557
        }
558
 
559
        /*
560
         * If zone overgrows RTZ_HASHING_LIMIT, create hash table.
561
         */
562
 
563
        if (fz->fz_nent >= RTZ_HASHING_LIMIT && !fz->fz_hash_table && logmask<32)
564
        {
565
                struct fib_node ** ht;
566
#if RT_CACHE_DEBUG >= 2
567
                printk("fib_add_1: hashing for zone %d started\n", logmask);
568
#endif
569
                ht = kmalloc(RTZ_HASH_DIVISOR*sizeof(struct rtable*), GFP_KERNEL);
570
 
571
                if (ht)
572
                {
573
                        memset(ht, 0, RTZ_HASH_DIVISOR*sizeof(struct fib_node*));
574
                        cli();
575
                        f1 = fz->fz_list;
576
                        while (f1)
577
                        {
578
                                struct fib_node * next, **end;
579
                                unsigned hash = fz_hash_code(f1->fib_dst, logmask);
580
                                next = f1->fib_next;
581
                                f1->fib_next = NULL;
582
                                end = &ht[hash];
583
                                while(*end != NULL)
584
                                        end = &(*end)->fib_next;
585
                                *end = f1;
586
                                f1 = next;
587
                        }
588
                        fz->fz_list = NULL;
589
                        fz->fz_hash_table = ht;
590
                        sti();
591
                }
592
        }
593
 
594
        if (fz->fz_hash_table)
595
                fp = &fz->fz_hash_table[fz_hash_code(dst, logmask)];
596
        else
597
                fp = &fz->fz_list;
598
 
599
        /*
600
         * Scan list to find the first route with the same destination
601
         */
602
        while ((f1 = *fp) != NULL)
603
        {
604
                if (f1->fib_dst == dst)
605
                        break;
606
                fp = &f1->fib_next;
607
        }
608
 
609
        /*
610
         * Find route with the same destination and less (or equal) metric.
611
         */
612
        while ((f1 = *fp) != NULL && f1->fib_dst == dst)
613
        {
614
                if (f1->fib_metric >= metric)
615
                        break;
616
                /*
617
                 *      Record route with the same destination and gateway,
618
                 *      but less metric. We'll delete it
619
                 *      after instantiation of new route.
620
                 */
621
                if (f1->fib_info->fib_gateway == gw &&
622
                    (gw || f1->fib_info->fib_dev == dev))
623
                        dup_fp = fp;
624
                fp = &f1->fib_next;
625
        }
626
 
627
        /*
628
         * Is it already present?
629
         */
630
 
631
        if (f1 && f1->fib_metric == metric && f1->fib_info == fi)
632
        {
633
                fib_free_node(f);
634
                return;
635
        }
636
 
637
        /*
638
         * Insert new entry to the list.
639
         */
640
 
641
        cli();
642
        f->fib_next = f1;
643
        *fp = f;
644
        if (!fib_loopback && (fi->fib_dev->flags & IFF_LOOPBACK))
645
                fib_loopback = f;
646
        sti();
647
        fz->fz_nent++;
648
        ip_netlink_msg(RTMSG_NEWROUTE, dst, gw, mask, flags, metric, fi->fib_dev->name);
649
 
650
        /*
651
         *      Delete route with the same destination and gateway.
652
         *      Note that we should have at most one such route.
653
         */
654
        if (dup_fp)
655
                fp = dup_fp;
656
        else
657
                fp = &f->fib_next;
658
 
659
        while ((f1 = *fp) != NULL && f1->fib_dst == dst)
660
        {
661
                if (f1->fib_info->fib_gateway == gw &&
662
                    (gw || f1->fib_info->fib_dev == dev))
663
                {
664
                        cli();
665
                        *fp = f1->fib_next;
666
                        if (fib_loopback == f1)
667
                                fib_loopback = NULL;
668
                        sti();
669
                        ip_netlink_msg(RTMSG_DELROUTE, dst, gw, mask, flags, metric, f1->fib_info->fib_dev->name);
670
                        fib_free_node(f1);
671
                        fz->fz_nent--;
672
                        break;
673
                }
674
                fp = &f1->fib_next;
675
        }
676
        rt_cache_flush();
677
        return;
678
}
679
 
680
static int rt_flush_list(struct fib_node ** fp, struct device *dev)
681
{
682
        int found = 0;
683
        struct fib_node *f;
684
 
685
        while ((f = *fp) != NULL) {
686
/*
687
 *      "Magic" device route is allowed to point to loopback,
688
 *      discard it too.
689
 */
690
                if (f->fib_info->fib_dev != dev &&
691
                    (f->fib_info->fib_dev != &loopback_dev || f->fib_dst != dev->pa_addr)) {
692
                        fp = &f->fib_next;
693
                        continue;
694
                }
695
                cli();
696
                *fp = f->fib_next;
697
                if (fib_loopback == f)
698
                        fib_loopback = NULL;
699
                sti();
700
                fib_free_node(f);
701
                found++;
702
        }
703
        return found;
704
}
705
 
706
static __inline__ void fib_flush_1(struct device *dev)
707
{
708
        struct fib_zone *fz;
709
        int found = 0;
710
 
711
        for (fz = fib_zone_list; fz; fz = fz->fz_next)
712
        {
713
                if (fz->fz_hash_table)
714
                {
715
                        int i;
716
                        int tmp = 0;
717
                        for (i=0; i<RTZ_HASH_DIVISOR; i++)
718
                                tmp += rt_flush_list(&fz->fz_hash_table[i], dev);
719
                        fz->fz_nent -= tmp;
720
                        found += tmp;
721
                }
722
                else
723
                {
724
                        int tmp;
725
                        tmp = rt_flush_list(&fz->fz_list, dev);
726
                        fz->fz_nent -= tmp;
727
                        found += tmp;
728
                }
729
        }
730
 
731
        if (found)
732
                rt_cache_flush();
733
}
734
 
735
 
736
/*
737
 *      Called from the PROCfs module. This outputs /proc/net/route.
738
 *
739
 *      We preserve the old format but pad the buffers out. This means that
740
 *      we can spin over the other entries as we read them. Remember the
741
 *      gated BGP4 code could need to read 60,000+ routes on occasion (that's
742
 *      about 7Mb of data). To do that ok we will need to also cache the
743
 *      last route we got to (reads will generally be following on from
744
 *      one another without gaps).
745
 */
746
 
747
int rt_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
748
{
749
        struct fib_zone *fz;
750
        struct fib_node *f;
751
        int len=0;
752
        off_t pos=0;
753
        char temp[129];
754
        int i;
755
 
756
        pos = 128;
757
 
758
        if (offset<128)
759
        {
760
                sprintf(buffer,"%-127s\n","Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT");
761
                len = 128;
762
        }
763
 
764
        while  (ip_rt_lock)
765
                sleep_on(&rt_wait);
766
        ip_rt_fast_lock();
767
 
768
        for (fz=fib_zone_list; fz; fz = fz->fz_next)
769
        {
770
                int maxslot;
771
                struct fib_node ** fp;
772
 
773
                if (fz->fz_nent == 0)
774
                        continue;
775
 
776
                if (pos + 128*fz->fz_nent <= offset)
777
                {
778
                        pos += 128*fz->fz_nent;
779
                        len = 0;
780
                        continue;
781
                }
782
 
783
                if (fz->fz_hash_table)
784
                {
785
                        maxslot = RTZ_HASH_DIVISOR;
786
                        fp      = fz->fz_hash_table;
787
                }
788
                else
789
                {
790
                        maxslot = 1;
791
                        fp      = &fz->fz_list;
792
                }
793
 
794
                for (i=0; i < maxslot; i++, fp++)
795
                {
796
 
797
                        for (f = *fp; f; f = f->fib_next)
798
                        {
799
                                struct fib_info * fi;
800
                                /*
801
                                 *      Spin through entries until we are ready
802
                                 */
803
                                pos += 128;
804
 
805
                                if (pos <= offset)
806
                                {
807
                                        len=0;
808
                                        continue;
809
                                }
810
 
811
                                fi = f->fib_info;
812
                                sprintf(temp, "%s\t%08lX\t%08lX\t%02X\t%d\t%lu\t%d\t%08lX\t%d\t%lu\t%u",
813
                                        fi->fib_dev->name, (unsigned long)f->fib_dst, (unsigned long)fi->fib_gateway,
814
                                        fi->fib_flags, 0, f->fib_use, f->fib_metric,
815
                                        (unsigned long)fz->fz_mask, (int)fi->fib_mtu, fi->fib_window, (int)fi->fib_irtt);
816
                                sprintf(buffer+len,"%-127s\n",temp);
817
 
818
                                len += 128;
819
                                if (pos >= offset+length)
820
                                        goto done;
821
                        }
822
                }
823
        }
824
 
825
done:
826
        ip_rt_unlock();
827
        wake_up(&rt_wait);
828
 
829
        *start = buffer+len-(pos-offset);
830
        len = pos - offset;
831
        if (len>length)
832
                len = length;
833
        return len;
834
}
835
 
836
int rt_cache_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
837
{
838
        int len=0;
839
        off_t pos=0;
840
        char temp[129];
841
        struct rtable *r;
842
        int i;
843
 
844
        pos = 128;
845
 
846
        if (offset<128)
847
        {
848
                sprintf(buffer,"%-127s\n","Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tSource\t\tMTU\tWindow\tIRTT\tHH\tARP");
849
                len = 128;
850
        }
851
 
852
 
853
        while  (ip_rt_lock)
854
                sleep_on(&rt_wait);
855
        ip_rt_fast_lock();
856
 
857
        for (i = 0; i<RT_HASH_DIVISOR; i++)
858
        {
859
                for (r = ip_rt_hash_table[i]; r; r = r->rt_next)
860
                {
861
                        /*
862
                         *      Spin through entries until we are ready
863
                         */
864
                        pos += 128;
865
 
866
                        if (pos <= offset)
867
                        {
868
                                len = 0;
869
                                continue;
870
                        }
871
 
872
                        sprintf(temp, "%s\t%08lX\t%08lX\t%02X\t%d\t%u\t%d\t%08lX\t%d\t%lu\t%u\t%d\t%1d",
873
                                r->rt_dev->name, (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
874
                                r->rt_flags, r->rt_refcnt, r->rt_use, 0,
875
                                (unsigned long)r->rt_src, (int)r->rt_mtu, r->rt_window, (int)r->rt_irtt, r->rt_hh ? r->rt_hh->hh_refcnt : -1, r->rt_hh ? r->rt_hh->hh_uptodate : 0);
876
                        sprintf(buffer+len,"%-127s\n",temp);
877
                        len += 128;
878
                        if (pos >= offset+length)
879
                                goto done;
880
                }
881
        }
882
 
883
done:
884
        ip_rt_unlock();
885
        wake_up(&rt_wait);
886
 
887
        *start = buffer+len-(pos-offset);
888
        len = pos-offset;
889
        if (len>length)
890
                len = length;
891
        return len;
892
}
893
 
894
 
895
void rt_free(struct rtable * rt)
896
{
897
        unsigned long flags;
898
 
899
        save_flags(flags);
900
        cli();
901
        if (!rt->rt_refcnt)
902
        {
903
                struct hh_cache * hh = rt->rt_hh;
904
                rt->rt_hh = NULL;
905
                restore_flags(flags);
906
                if (hh && atomic_dec_and_test(&hh->hh_refcnt))
907
                        kfree_s(hh, sizeof(struct hh_cache));
908
                kfree_s(rt, sizeof(struct rtable));
909
                return;
910
        }
911
        rt->rt_next = rt_free_queue;
912
        rt->rt_flags &= ~RTF_UP;
913
        rt_free_queue = rt;
914
        ip_rt_bh_mask |= RT_BH_FREE;
915
#if RT_CACHE_DEBUG >= 2
916
        printk("rt_free: %08x\n", rt->rt_dst);
917
#endif
918
        restore_flags(flags);
919
}
920
 
921
/*
922
 * RT "bottom half" handlers. Called with masked interrupts.
923
 */
924
 
925
static __inline__ void rt_kick_free_queue(void)
926
{
927
        struct rtable *rt, **rtp;
928
#if RT_CACHE_DEBUG >= 2
929
        static int in = 0;
930
 
931
        if(in) {
932
                printk("Attempted multiple entry: rt_kick_free_queue\n");
933
                return;
934
        }
935
        in++;
936
#endif
937
 
938
        ip_rt_bh_mask &= ~RT_BH_FREE;
939
 
940
        rtp = &rt_free_queue;
941
 
942
        while ((rt = *rtp) != NULL)
943
        {
944
                if  (!rt->rt_refcnt)
945
                {
946
                        struct hh_cache * hh = rt->rt_hh;
947
#if RT_CACHE_DEBUG >= 2
948
                        __u32 daddr = rt->rt_dst;
949
#endif
950
                        *rtp = rt->rt_next;
951
                        rt->rt_hh = NULL;
952
                        sti();
953
                        if (hh && atomic_dec_and_test(&hh->hh_refcnt))
954
                                kfree_s(hh, sizeof(struct hh_cache));
955
                        kfree_s(rt, sizeof(struct rtable));
956
#if RT_CACHE_DEBUG >= 2
957
                        printk("rt_kick_free_queue: %08x is free\n", daddr);
958
#endif
959
                        cli();
960
                        continue;
961
                }
962
                rtp = &rt->rt_next;
963
        }
964
#if RT_CACHE_DEBUG >= 2
965
        in--;
966
#endif
967
}
968
 
969
void ip_rt_run_bh()
970
{
971
        unsigned long flags;
972
        save_flags(flags);
973
        cli();
974
        if (ip_rt_bh_mask && !ip_rt_lock)
975
        {
976
                if (ip_rt_bh_mask & RT_BH_REDIRECT)
977
                        rt_kick_backlog();
978
 
979
                if (ip_rt_bh_mask & RT_BH_GARBAGE_COLLECT)
980
                {
981
                        ip_rt_fast_lock();
982
                        ip_rt_bh_mask &= ~RT_BH_GARBAGE_COLLECT;
983
                        sti();
984
                        rt_garbage_collect_1();
985
                        cli();
986
                        ip_rt_fast_unlock();
987
                }
988
 
989
                if (ip_rt_bh_mask & RT_BH_FREE) {
990
                        ip_rt_fast_lock();
991
                        rt_kick_free_queue();
992
                        ip_rt_fast_unlock();
993
                }
994
        }
995
        restore_flags(flags);
996
}
997
 
998
 
999
void ip_rt_check_expire()
1000
{
1001
        ip_rt_fast_lock();
1002
        if (ip_rt_lock == 1)
1003
        {
1004
                int i;
1005
                struct rtable *rth, **rthp;
1006
                unsigned long flags;
1007
                unsigned long now = jiffies;
1008
 
1009
                save_flags(flags);
1010
                for (i=0; i<RT_HASH_DIVISOR; i++)
1011
                {
1012
                        rthp = &ip_rt_hash_table[i];
1013
 
1014
                        while ((rth = *rthp) != NULL)
1015
                        {
1016
                                struct rtable * rth_next = rth->rt_next;
1017
 
1018
                                /*
1019
                                 * Cleanup aged off entries.
1020
                                 */
1021
 
1022
                                cli();
1023
                                if (!rth->rt_refcnt && rth->rt_lastuse + RT_CACHE_TIMEOUT < now)
1024
                                {
1025
                                        *rthp = rth_next;
1026
                                        sti();
1027
                                        rt_cache_size--;
1028
#if RT_CACHE_DEBUG >= 2
1029
                                        printk("rt_check_expire clean %02x@%08x\n", i, rth->rt_dst);
1030
#endif
1031
                                        rt_free(rth);
1032
                                        continue;
1033
                                }
1034
                                sti();
1035
 
1036
                                if (!rth_next)
1037
                                        break;
1038
 
1039
                                /*
1040
                                 * LRU ordering.
1041
                                 */
1042
 
1043
                                if (rth->rt_lastuse + RT_CACHE_BUBBLE_THRESHOLD < rth_next->rt_lastuse ||
1044
                                    (rth->rt_lastuse < rth_next->rt_lastuse &&
1045
                                     rth->rt_use < rth_next->rt_use))
1046
                                {
1047
#if RT_CACHE_DEBUG >= 2
1048
                                        printk("rt_check_expire bubbled %02x@%08x<->%08x\n", i, rth->rt_dst, rth_next->rt_dst);
1049
#endif
1050
                                        cli();
1051
                                        *rthp = rth_next;
1052
                                        rth->rt_next = rth_next->rt_next;
1053
                                        rth_next->rt_next = rth;
1054
                                        sti();
1055
                                        rthp = &rth_next->rt_next;
1056
                                        continue;
1057
                                }
1058
                                rthp = &rth->rt_next;
1059
                        }
1060
                }
1061
                restore_flags(flags);
1062
                rt_kick_free_queue();
1063
        }
1064
        ip_rt_unlock();
1065
}
1066
 
1067
static void rt_redirect_1(__u32 dst, __u32 gw, struct device *dev)
1068
{
1069
        struct rtable *rt;
1070
        unsigned long hash = ip_rt_hash_code(dst);
1071
 
1072
        if (gw == dev->pa_addr)
1073
                return;
1074
        if (dev != get_gw_dev(gw))
1075
                return;
1076
        rt = (struct rtable *) kmalloc(sizeof(struct rtable), GFP_ATOMIC);
1077
        if (rt == NULL)
1078
                return;
1079
        memset(rt, 0, sizeof(struct rtable));
1080
        rt->rt_flags = RTF_DYNAMIC | RTF_MODIFIED | RTF_HOST | RTF_GATEWAY | RTF_UP;
1081
        rt->rt_dst = dst;
1082
        rt->rt_dev = dev;
1083
        rt->rt_gateway = gw;
1084
        rt->rt_src = dev->pa_addr;
1085
        rt->rt_mtu = dev->mtu;
1086
#ifdef CONFIG_NO_PATH_MTU_DISCOVERY
1087
        if (dev->mtu > 576)
1088
                rt->rt_mtu = 576;
1089
#endif
1090
        rt->rt_lastuse  = jiffies;
1091
        rt->rt_refcnt  = 1;
1092
        rt_cache_add(hash, rt);
1093
        ip_rt_put(rt);
1094
        return;
1095
}
1096
 
1097
static void rt_cache_flush(void)
1098
{
1099
        int i;
1100
        struct rtable * rth, * next;
1101
 
1102
        for (i=0; i<RT_HASH_DIVISOR; i++)
1103
        {
1104
                int nr=0;
1105
 
1106
                cli();
1107
                if (!(rth = ip_rt_hash_table[i]))
1108
                {
1109
                        sti();
1110
                        continue;
1111
                }
1112
 
1113
                ip_rt_hash_table[i] = NULL;
1114
                sti();
1115
 
1116
                for (; rth; rth=next)
1117
                {
1118
                        next = rth->rt_next;
1119
                        rt_cache_size--;
1120
                        nr++;
1121
                        rth->rt_next = NULL;
1122
                        rt_free(rth);
1123
                }
1124
#if RT_CACHE_DEBUG >= 2
1125
                if (nr > 0)
1126
                        printk("rt_cache_flush: %d@%02x\n", nr, i);
1127
#endif
1128
        }
1129
#if RT_CACHE_DEBUG >= 1
1130
        if (rt_cache_size)
1131
        {
1132
                printk("rt_cache_flush: bug rt_cache_size=%d\n", rt_cache_size);
1133
                rt_cache_size = 0;
1134
        }
1135
#endif
1136
}
1137
 
1138
static void rt_garbage_collect_1(void)
1139
{
1140
        int i;
1141
        unsigned expire = RT_CACHE_TIMEOUT>>1;
1142
        struct rtable * rth, **rthp;
1143
        unsigned long now = jiffies;
1144
 
1145
        for (;;)
1146
        {
1147
                for (i=0; i<RT_HASH_DIVISOR; i++)
1148
                {
1149
                        if (!ip_rt_hash_table[i])
1150
                                continue;
1151
                        for (rthp=&ip_rt_hash_table[i]; (rth=*rthp); rthp=&rth->rt_next)
1152
                        {
1153
                                if (rth->rt_lastuse + expire*(rth->rt_refcnt+1) > now)
1154
                                        continue;
1155
                                rt_cache_size--;
1156
                                cli();
1157
                                *rthp=rth->rt_next;
1158
                                rth->rt_next = NULL;
1159
                                sti();
1160
                                rt_free(rth);
1161
                                break;
1162
                        }
1163
                }
1164
                if (rt_cache_size < RT_CACHE_SIZE_MAX)
1165
                        return;
1166
                expire >>= 1;
1167
        }
1168
}
1169
 
1170
static __inline__ void rt_req_enqueue(struct rt_req **q, struct rt_req *rtr)
1171
{
1172
        unsigned long flags;
1173
        struct rt_req * tail;
1174
 
1175
        save_flags(flags);
1176
        cli();
1177
        tail = *q;
1178
        if (!tail)
1179
                rtr->rtr_next = rtr;
1180
        else
1181
        {
1182
                rtr->rtr_next = tail->rtr_next;
1183
                tail->rtr_next = rtr;
1184
        }
1185
        *q = rtr;
1186
        restore_flags(flags);
1187
        return;
1188
}
1189
 
1190
/*
1191
 * Caller should mask interrupts.
1192
 */
1193
 
1194
static __inline__ struct rt_req * rt_req_dequeue(struct rt_req **q)
1195
{
1196
        struct rt_req * rtr;
1197
 
1198
        if (*q)
1199
        {
1200
                rtr = (*q)->rtr_next;
1201
                (*q)->rtr_next = rtr->rtr_next;
1202
                if (rtr->rtr_next == rtr)
1203
                        *q = NULL;
1204
                rtr->rtr_next = NULL;
1205
                return rtr;
1206
        }
1207
        return NULL;
1208
}
1209
 
1210
/*
1211
   Called with masked interrupts
1212
 */
1213
 
1214
static void rt_kick_backlog()
1215
{
1216
        if (!ip_rt_lock)
1217
        {
1218
                struct rt_req * rtr;
1219
 
1220
                ip_rt_fast_lock();
1221
 
1222
                while ((rtr = rt_req_dequeue(&rt_backlog)) != NULL)
1223
                {
1224
                        sti();
1225
                        rt_redirect_1(rtr->dst, rtr->gw, rtr->dev);
1226
                        kfree_s(rtr, sizeof(struct rt_req));
1227
                        cli();
1228
                }
1229
 
1230
                ip_rt_bh_mask &= ~RT_BH_REDIRECT;
1231
 
1232
                ip_rt_fast_unlock();
1233
        }
1234
}
1235
 
1236
/*
1237
 * rt_{del|add|flush} called only from USER process. Waiting is OK.
1238
 */
1239
 
1240
static int rt_del(__u32 dst, __u32 mask,
1241
                struct device * dev, __u32 gtw, short rt_flags, short metric)
1242
{
1243
        int retval;
1244
 
1245
        while (ip_rt_lock)
1246
                sleep_on(&rt_wait);
1247
        ip_rt_fast_lock();
1248
        retval = fib_del_1(dst, mask, dev, gtw, rt_flags, metric);
1249
        ip_rt_unlock();
1250
        wake_up(&rt_wait);
1251
        return retval;
1252
}
1253
 
1254
static void rt_add(short flags, __u32 dst, __u32 mask,
1255
        __u32 gw, struct device *dev, unsigned short mss,
1256
        unsigned long window, unsigned short irtt, short metric)
1257
{
1258
        while (ip_rt_lock)
1259
                sleep_on(&rt_wait);
1260
        ip_rt_fast_lock();
1261
        fib_add_1(flags, dst, mask, gw, dev, mss, window, irtt, metric);
1262
        ip_rt_unlock();
1263
        wake_up(&rt_wait);
1264
}
1265
 
1266
void ip_rt_flush(struct device *dev)
1267
{
1268
        while (ip_rt_lock)
1269
                sleep_on(&rt_wait);
1270
        ip_rt_fast_lock();
1271
        fib_flush_1(dev);
1272
        ip_rt_unlock();
1273
        wake_up(&rt_wait);
1274
}
1275
 
1276
/*
1277
   Called by ICMP module.
1278
 */
1279
 
1280
void ip_rt_redirect(__u32 src, __u32 dst, __u32 gw, struct device *dev)
1281
{
1282
        struct rt_req * rtr;
1283
        struct rtable * rt;
1284
 
1285
        rt = ip_rt_route(dst, 0, NULL);
1286
        if (!rt)
1287
                return;
1288
 
1289
        if (rt->rt_gateway != src ||
1290
            rt->rt_dev != dev ||
1291
            ((gw^dev->pa_addr)&dev->pa_mask) ||
1292
            ip_chk_addr(gw))
1293
        {
1294
                ip_rt_put(rt);
1295
                return;
1296
        }
1297
        ip_rt_put(rt);
1298
 
1299
        ip_rt_fast_lock();
1300
        if (ip_rt_lock == 1)
1301
        {
1302
                rt_redirect_1(dst, gw, dev);
1303
                ip_rt_unlock();
1304
                return;
1305
        }
1306
 
1307
        rtr = kmalloc(sizeof(struct rt_req), GFP_ATOMIC);
1308
        if (rtr)
1309
        {
1310
                rtr->dst = dst;
1311
                rtr->gw = gw;
1312
                rtr->dev = dev;
1313
                rt_req_enqueue(&rt_backlog, rtr);
1314
                ip_rt_bh_mask |= RT_BH_REDIRECT;
1315
        }
1316
        ip_rt_unlock();
1317
}
1318
 
1319
 
1320
static __inline__ void rt_garbage_collect(void)
1321
{
1322
        if (ip_rt_lock == 1)
1323
        {
1324
                rt_garbage_collect_1();
1325
                return;
1326
        }
1327
        ip_rt_bh_mask |= RT_BH_GARBAGE_COLLECT;
1328
}
1329
 
1330
static void rt_cache_add(unsigned hash, struct rtable * rth)
1331
{
1332
        unsigned long   flags;
1333
        struct rtable   **rthp;
1334
        __u32           daddr = rth->rt_dst;
1335
        unsigned long   now = jiffies;
1336
 
1337
#if RT_CACHE_DEBUG >= 2
1338
        if (ip_rt_lock != 1)
1339
        {
1340
                printk("rt_cache_add: ip_rt_lock==%d\n", ip_rt_lock);
1341
                return;
1342
        }
1343
#endif
1344
 
1345
        save_flags(flags);
1346
 
1347
        if (rth->rt_dev->header_cache_bind)
1348
        {
1349
                struct rtable * rtg = rth;
1350
 
1351
                if (rth->rt_gateway != daddr)
1352
                {
1353
                        ip_rt_fast_unlock();
1354
                        rtg = ip_rt_route(rth->rt_gateway, 0, NULL);
1355
                        ip_rt_fast_lock();
1356
                }
1357
 
1358
                if (rtg)
1359
                {
1360
                        if (rtg == rth)
1361
                                rtg->rt_dev->header_cache_bind(&rtg->rt_hh, rtg->rt_dev, ETH_P_IP, rtg->rt_dst);
1362
                        else
1363
                        {
1364
                                if (rtg->rt_hh)
1365
                                        atomic_inc(&rtg->rt_hh->hh_refcnt);
1366
                                rth->rt_hh = rtg->rt_hh;
1367
                                ip_rt_put(rtg);
1368
                        }
1369
                }
1370
        }
1371
 
1372
        if (rt_cache_size >= RT_CACHE_SIZE_MAX)
1373
                rt_garbage_collect();
1374
 
1375
        cli();
1376
        rth->rt_next = ip_rt_hash_table[hash];
1377
#if RT_CACHE_DEBUG >= 2
1378
        if (rth->rt_next)
1379
        {
1380
                struct rtable * trth;
1381
                printk("rt_cache @%02x: %08x", hash, daddr);
1382
                for (trth=rth->rt_next; trth; trth=trth->rt_next)
1383
                        printk(" . %08x", trth->rt_dst);
1384
                printk("\n");
1385
        }
1386
#endif
1387
        ip_rt_hash_table[hash] = rth;
1388
        rthp = &rth->rt_next;
1389
        sti();
1390
        rt_cache_size++;
1391
 
1392
        /*
1393
         * Cleanup duplicate (and aged off) entries.
1394
         */
1395
 
1396
        while ((rth = *rthp) != NULL)
1397
        {
1398
 
1399
                cli();
1400
                if ((!rth->rt_refcnt && rth->rt_lastuse + RT_CACHE_TIMEOUT < now)
1401
                    || rth->rt_dst == daddr)
1402
                {
1403
                        *rthp = rth->rt_next;
1404
                        rt_cache_size--;
1405
                        sti();
1406
#if RT_CACHE_DEBUG >= 2
1407
                        printk("rt_cache clean %02x@%08x\n", hash, rth->rt_dst);
1408
#endif
1409
                        rt_free(rth);
1410
                        continue;
1411
                }
1412
                sti();
1413
                rthp = &rth->rt_next;
1414
        }
1415
        restore_flags(flags);
1416
}
1417
 
1418
/*
1419
   RT should be already locked.
1420
 
1421
   We could improve this by keeping a chain of say 32 struct rtable's
1422
   last freed for fast recycling.
1423
 
1424
 */
1425
 
1426
struct rtable * ip_rt_slow_route (__u32 daddr, int local, struct device *dev)
1427
{
1428
        unsigned hash = ip_rt_hash_code(daddr)^local;
1429
        struct rtable * rth;
1430
        struct fib_node * f;
1431
        struct fib_info * fi;
1432
        __u32 saddr;
1433
 
1434
#if RT_CACHE_DEBUG >= 2
1435
        printk("rt_cache miss @%08x\n", daddr);
1436
#endif
1437
 
1438
        rth = kmalloc(sizeof(struct rtable), GFP_ATOMIC);
1439
        if (!rth)
1440
        {
1441
                ip_rt_unlock();
1442
                return NULL;
1443
        }
1444
 
1445
        if (local)
1446
                f = fib_lookup_local(daddr, dev);
1447
        else
1448
                f = fib_lookup (daddr, dev);
1449
 
1450
        if (f)
1451
        {
1452
                fi = f->fib_info;
1453
                f->fib_use++;
1454
        }
1455
 
1456
        if (!f || (fi->fib_flags & RTF_REJECT))
1457
        {
1458
#ifdef CONFIG_KERNELD   
1459
                char wanted_route[20];
1460
#endif          
1461
#if RT_CACHE_DEBUG >= 2
1462
                printk("rt_route failed @%08x\n", daddr);
1463
#endif
1464
                ip_rt_unlock();
1465
                kfree_s(rth, sizeof(struct rtable));
1466
#ifdef CONFIG_KERNELD           
1467
                if (MULTICAST(daddr))
1468
                        return NULL;
1469
                daddr=ntohl(daddr);
1470
                sprintf(wanted_route, "%d.%d.%d.%d",
1471
                        (int)(daddr >> 24) & 0xff, (int)(daddr >> 16) & 0xff,
1472
                        (int)(daddr >> 8) & 0xff, (int)daddr & 0xff);
1473
                kerneld_route(wanted_route);    /* Dynamic route request */
1474
#endif          
1475
                return NULL;
1476
        }
1477
 
1478
        saddr = fi->fib_dev->pa_addr;
1479
 
1480
        if (daddr == fi->fib_dev->pa_addr)
1481
        {
1482
                f->fib_use--;
1483
                if ((f = fib_loopback) != NULL)
1484
                {
1485
                        f->fib_use++;
1486
                        fi = f->fib_info;
1487
                }
1488
        }
1489
 
1490
        if (!f)
1491
        {
1492
                ip_rt_unlock();
1493
                kfree_s(rth, sizeof(struct rtable));
1494
                return NULL;
1495
        }
1496
 
1497
        rth->rt_dst     = daddr;
1498
        rth->rt_src     = saddr;
1499
        rth->rt_lastuse = jiffies;
1500
        rth->rt_refcnt  = 1;
1501
        rth->rt_use     = 1;
1502
        rth->rt_next    = NULL;
1503
        rth->rt_hh      = NULL;
1504
        rth->rt_gateway = fi->fib_gateway;
1505
        rth->rt_dev     = fi->fib_dev;
1506
        rth->rt_mtu     = fi->fib_mtu;
1507
        rth->rt_window  = fi->fib_window;
1508
        rth->rt_irtt    = fi->fib_irtt;
1509
        rth->rt_tos     = f->fib_tos;
1510
        rth->rt_flags   = fi->fib_flags | RTF_HOST;
1511
        if (local)
1512
                rth->rt_flags   |= RTF_LOCAL;
1513
 
1514
        if (!(rth->rt_flags & RTF_GATEWAY))
1515
                rth->rt_gateway = rth->rt_dst;
1516
        /*
1517
         *      Multicast or limited broadcast is never gatewayed.
1518
         */
1519
        if (MULTICAST(daddr) || daddr == 0xFFFFFFFF)
1520
                rth->rt_gateway = rth->rt_dst;
1521
 
1522
        if (ip_rt_lock == 1)
1523
        {
1524
                /* Don't add this to the rt_cache if a device was specified,
1525
                 * because we might have skipped better routes which didn't
1526
                 * point at the right device. */
1527
                if (dev != NULL)
1528
                        rth->rt_flags |= RTF_NOTCACHED;
1529
                else
1530
                        rt_cache_add(hash, rth);
1531
        }
1532
        else
1533
        {
1534
                rt_free(rth);
1535
#if RT_CACHE_DEBUG >= 1
1536
                printk(KERN_DEBUG "rt_cache: route to %08x was born dead\n", daddr);
1537
#endif
1538
        }
1539
 
1540
        ip_rt_unlock();
1541
        return rth;
1542
}
1543
 
1544
void ip_rt_put(struct rtable * rt)
1545
{
1546
        /* If this rtable entry is not in the cache, we'd better free
1547
         * it once the refcnt goes to zero, because nobody else will.
1548
         */
1549
        if (rt&&atomic_dec_and_test(&rt->rt_refcnt)&&(rt->rt_flags&RTF_NOTCACHED))
1550
                rt_free(rt);
1551
}
1552
 
1553
/*
1554
 *      Return routing dev for given address.
1555
 *      Called by ip_alias module to avoid using ip_rt_route and
1556
 *      generating hhs.
1557
 */
1558
struct device * ip_rt_dev(__u32 addr)
1559
{
1560
        struct fib_node *f;
1561
        f = fib_lookup(addr, NULL);
1562
        if (f)
1563
                return f->fib_info->fib_dev;
1564
        return NULL;
1565
}
1566
 
1567
struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev)
1568
{
1569
        struct rtable * rth;
1570
 
1571
        ip_rt_fast_lock();
1572
 
1573
        for (rth=ip_rt_hash_table[ip_rt_hash_code(daddr)^local]; rth; rth=rth->rt_next)
1574
        {
1575
                /* If a network device is specified, make sure this route points to it. */
1576
                if ( (rth->rt_dst == daddr) && ((dev==NULL) || (dev==rth->rt_dev)) )
1577
                {
1578
                        rth->rt_lastuse = jiffies;
1579
                        atomic_inc(&rth->rt_use);
1580
                        atomic_inc(&rth->rt_refcnt);
1581
                        ip_rt_unlock();
1582
                        return rth;
1583
                }
1584
        }
1585
        return ip_rt_slow_route (daddr, local, dev);
1586
}
1587
 
1588
/*
1589
 *      Process a route add request from the user, or from a kernel
1590
 *      task.
1591
 */
1592
 
1593
int ip_rt_new(struct rtentry *r)
1594
{
1595
        int err;
1596
        char * devname;
1597
        struct device * dev = NULL;
1598
        unsigned long flags;
1599
        __u32 daddr, mask, gw;
1600
        short metric;
1601
 
1602
        /*
1603
         *      If a device is specified find it.
1604
         */
1605
 
1606
        if ((devname = r->rt_dev) != NULL)
1607
        {
1608
                err = getname(devname, &devname);
1609
                if (err)
1610
                        return err;
1611
                dev = dev_get(devname);
1612
                putname(devname);
1613
                if (!dev)
1614
                        return -ENODEV;
1615
        }
1616
 
1617
        /*
1618
         *      If the device isn't INET, don't allow it
1619
         */
1620
 
1621
        if (r->rt_dst.sa_family != AF_INET)
1622
                return -EAFNOSUPPORT;
1623
 
1624
        /*
1625
         *      Make local copies of the important bits
1626
         *      We decrement the metric by one for BSD compatibility.
1627
         */
1628
 
1629
        flags = r->rt_flags;
1630
        daddr = (__u32) ((struct sockaddr_in *) &r->rt_dst)->sin_addr.s_addr;
1631
        mask  = (__u32) ((struct sockaddr_in *) &r->rt_genmask)->sin_addr.s_addr;
1632
        gw    = (__u32) ((struct sockaddr_in *) &r->rt_gateway)->sin_addr.s_addr;
1633
        metric = r->rt_metric > 0 ? r->rt_metric - 1 : 0;
1634
 
1635
        /*
1636
         *      BSD emulation: Permits route add someroute gw one-of-my-addresses
1637
         *      to indicate which iface. Not as clean as the nice Linux dev technique
1638
         *      but people keep using it...  (and gated likes it ;))
1639
         */
1640
 
1641
        if (!dev && (flags & RTF_GATEWAY))
1642
        {
1643
                struct device *dev2;
1644
                for (dev2 = dev_base ; dev2 != NULL ; dev2 = dev2->next)
1645
                {
1646
                        if ((dev2->flags & IFF_UP) && dev2->pa_addr == gw)
1647
                        {
1648
                                flags &= ~RTF_GATEWAY;
1649
                                dev = dev2;
1650
                                break;
1651
                        }
1652
                }
1653
        }
1654
 
1655
        if (flags & RTF_HOST)
1656
                mask = 0xffffffff;
1657
        else if (mask && r->rt_genmask.sa_family != AF_INET)
1658
                return -EAFNOSUPPORT;
1659
 
1660
        if (flags & RTF_GATEWAY)
1661
        {
1662
                if (r->rt_gateway.sa_family != AF_INET)
1663
                        return -EAFNOSUPPORT;
1664
 
1665
                /*
1666
                 *      Don't try to add a gateway we can't reach..
1667
                 *      Tunnel devices are exempt from this rule.
1668
                 */
1669
 
1670
                if (!dev)
1671
                        dev = get_gw_dev(gw);
1672
                else if (dev != get_gw_dev(gw) && dev->type != ARPHRD_TUNNEL)
1673
                        return -EINVAL;
1674
                if (!dev)
1675
                        return -ENETUNREACH;
1676
        }
1677
        else
1678
        {
1679
                gw = 0;
1680
                if (!dev)
1681
                        dev = ip_dev_bynet(daddr, mask);
1682
                if (!dev)
1683
                        return -ENETUNREACH;
1684
                if (!mask)
1685
                {
1686
                        if (((daddr ^ dev->pa_addr) & dev->pa_mask) == 0)
1687
                                mask = dev->pa_mask;
1688
                }
1689
        }
1690
 
1691
#ifndef CONFIG_IP_CLASSLESS
1692
        if (!mask)
1693
                mask = ip_get_mask(daddr);
1694
#endif
1695
 
1696
        if (bad_mask(mask, daddr))
1697
                return -EINVAL;
1698
 
1699
        /*
1700
         *      Add the route
1701
         */
1702
 
1703
        rt_add(flags, daddr, mask, gw, dev, r->rt_mss, r->rt_window, r->rt_irtt, metric);
1704
        return 0;
1705
}
1706
 
1707
 
1708
/*
1709
 *      Remove a route, as requested by the user.
1710
 */
1711
 
1712
int ip_rt_kill(struct rtentry *r)
1713
{
1714
        struct sockaddr_in *trg;
1715
        struct sockaddr_in *msk;
1716
        struct sockaddr_in *gtw;
1717
        char *devname;
1718
        int err;
1719
        struct device * dev = NULL;
1720
 
1721
        trg = (struct sockaddr_in *) &r->rt_dst;
1722
        msk = (struct sockaddr_in *) &r->rt_genmask;
1723
        gtw = (struct sockaddr_in *) &r->rt_gateway;
1724
        if ((devname = r->rt_dev) != NULL)
1725
        {
1726
                err = getname(devname, &devname);
1727
                if (err)
1728
                        return err;
1729
                dev = dev_get(devname);
1730
                putname(devname);
1731
                if (!dev)
1732
                        return -ENODEV;
1733
        }
1734
        /*
1735
         * metric can become negative here if it wasn't filled in
1736
         * but that's a fortunate accident; we really use that in rt_del.
1737
         */
1738
        err=rt_del((__u32)trg->sin_addr.s_addr, (__u32)msk->sin_addr.s_addr, dev,
1739
                (__u32)gtw->sin_addr.s_addr, r->rt_flags, r->rt_metric - 1);
1740
        return err;
1741
}
1742
 
1743
/*
1744
 *      Handle IP routing ioctl calls. These are used to manipulate the routing tables
1745
 */
1746
 
1747
int ip_rt_ioctl(unsigned int cmd, void *arg)
1748
{
1749
        int err;
1750
        struct rtentry rt;
1751
 
1752
        switch(cmd)
1753
        {
1754
                case SIOCADDRT:         /* Add a route */
1755
                case SIOCDELRT:         /* Delete a route */
1756
                        if (!suser())
1757
                                return -EPERM;
1758
                        err=verify_area(VERIFY_READ, arg, sizeof(struct rtentry));
1759
                        if (err)
1760
                                return err;
1761
                        memcpy_fromfs(&rt, arg, sizeof(struct rtentry));
1762
                        return (cmd == SIOCDELRT) ? ip_rt_kill(&rt) : ip_rt_new(&rt);
1763
        }
1764
 
1765
        return -EINVAL;
1766
}
1767
 
1768
void ip_rt_advice(struct rtable **rp, int advice)
1769
{
1770
        /* Thanks! */
1771
        return;
1772
}
1773
 
1774
void ip_rt_update(int event, struct device *dev)
1775
{
1776
/*
1777
 *      This causes too much grief to do now.
1778
 */
1779
#ifdef COMING_IN_2_1
1780
        if (event == NETDEV_UP)
1781
                rt_add(RTF_HOST|RTF_UP, dev->pa_addr, ~0, 0, dev, 0, 0, 0, 0);
1782
        else if (event == NETDEV_DOWN)
1783
                rt_del(dev->pa_addr, ~0, dev, 0, RTF_HOST|RTF_UP, 0);
1784
#endif          
1785
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.