OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [core/] [neighbour.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1278 phoenix
/*
2
 *      Generic address resolution entity
3
 *
4
 *      Authors:
5
 *      Pedro Roque             <roque@di.fc.ul.pt>
6
 *      Alexey Kuznetsov        <kuznet@ms2.inr.ac.ru>
7
 *
8
 *      This program is free software; you can redistribute it and/or
9
 *      modify it under the terms of the GNU General Public License
10
 *      as published by the Free Software Foundation; either version
11
 *      2 of the License, or (at your option) any later version.
12
 *
13
 *      Fixes:
14
 *      Vitaly E. Lavrov        releasing NULL neighbor in neigh_add.
15
 */
16
 
17
#include <linux/config.h>
18
#include <linux/types.h>
19
#include <linux/kernel.h>
20
#include <linux/socket.h>
21
#include <linux/sched.h>
22
#include <linux/netdevice.h>
23
#ifdef CONFIG_SYSCTL
24
#include <linux/sysctl.h>
25
#endif
26
#include <net/neighbour.h>
27
#include <net/dst.h>
28
#include <net/sock.h>
29
#include <linux/rtnetlink.h>
30
 
31
#define NEIGH_DEBUG 1
32
 
33
#define NEIGH_PRINTK(x...) printk(x)
34
#define NEIGH_NOPRINTK(x...) do { ; } while(0)
35
#define NEIGH_PRINTK0 NEIGH_PRINTK
36
#define NEIGH_PRINTK1 NEIGH_NOPRINTK
37
#define NEIGH_PRINTK2 NEIGH_NOPRINTK
38
 
39
#if NEIGH_DEBUG >= 1
40
#undef NEIGH_PRINTK1
41
#define NEIGH_PRINTK1 NEIGH_PRINTK
42
#endif
43
#if NEIGH_DEBUG >= 2
44
#undef NEIGH_PRINTK2
45
#define NEIGH_PRINTK2 NEIGH_PRINTK
46
#endif
47
 
48
static void neigh_timer_handler(unsigned long arg);
49
#ifdef CONFIG_ARPD
50
static void neigh_app_notify(struct neighbour *n);
51
#endif
52
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
53
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
54
 
55
static int neigh_glbl_allocs;
56
static struct neigh_table *neigh_tables;
57
 
58
/*
59
   Neighbour hash table buckets are protected with rwlock tbl->lock.
60
 
61
   - All the scans/updates to hash buckets MUST be made under this lock.
62
   - NOTHING clever should be made under this lock: no callbacks
63
     to protocol backends, no attempts to send something to network.
64
     It will result in deadlocks, if backend/driver wants to use neighbour
65
     cache.
66
   - If the entry requires some non-trivial actions, increase
67
     its reference count and release table lock.
68
 
69
   Neighbour entries are protected:
70
   - with reference count.
71
   - with rwlock neigh->lock
72
 
73
   Reference count prevents destruction.
74
 
75
   neigh->lock mainly serializes ll address data and its validity state.
76
   However, the same lock is used to protect another entry fields:
77
    - timer
78
    - resolution queue
79
 
80
   Again, nothing clever shall be made under neigh->lock,
81
   the most complicated procedure, which we allow is dev->hard_header.
82
   It is supposed, that dev->hard_header is simplistic and does
83
   not make callbacks to neighbour tables.
84
 
85
   The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
86
   list of neighbour tables. This list is used only in process context,
87
 */
88
 
89
static rwlock_t neigh_tbl_lock = RW_LOCK_UNLOCKED;
90
 
91
static int neigh_blackhole(struct sk_buff *skb)
92
{
93
        kfree_skb(skb);
94
        return -ENETDOWN;
95
}
96
 
97
/*
98
 * It is random distribution in the interval (1/2)*base...(3/2)*base.
99
 * It corresponds to default IPv6 settings and is not overridable,
100
 * because it is really reasonable choice.
101
 */
102
 
103
unsigned long neigh_rand_reach_time(unsigned long base)
104
{
105
        return (net_random() % base) + (base>>1);
106
}
107
 
108
 
109
static int neigh_forced_gc(struct neigh_table *tbl)
110
{
111
        int shrunk = 0;
112
        int i;
113
 
114
        for (i=0; i<=NEIGH_HASHMASK; i++) {
115
                struct neighbour *n, **np;
116
 
117
                np = &tbl->hash_buckets[i];
118
                write_lock_bh(&tbl->lock);
119
                while ((n = *np) != NULL) {
120
                        /* Neighbour record may be discarded if:
121
                           - nobody refers to it.
122
                           - it is not permanent
123
                           - (NEW and probably wrong)
124
                             INCOMPLETE entries are kept at least for
125
                             n->parms->retrans_time, otherwise we could
126
                             flood network with resolution requests.
127
                             It is not clear, what is better table overflow
128
                             or flooding.
129
                         */
130
                        write_lock(&n->lock);
131
                        if (atomic_read(&n->refcnt) == 1 &&
132
                            !(n->nud_state&NUD_PERMANENT) &&
133
                            (n->nud_state != NUD_INCOMPLETE ||
134
                             jiffies - n->used > n->parms->retrans_time)) {
135
                                *np = n->next;
136
                                n->dead = 1;
137
                                shrunk = 1;
138
                                write_unlock(&n->lock);
139
                                neigh_release(n);
140
                                continue;
141
                        }
142
                        write_unlock(&n->lock);
143
                        np = &n->next;
144
                }
145
                write_unlock_bh(&tbl->lock);
146
        }
147
 
148
        tbl->last_flush = jiffies;
149
        return shrunk;
150
}
151
 
152
static int neigh_del_timer(struct neighbour *n)
153
{
154
        if (n->nud_state & NUD_IN_TIMER) {
155
                if (del_timer(&n->timer)) {
156
                        neigh_release(n);
157
                        return 1;
158
                }
159
        }
160
        return 0;
161
}
162
 
163
static void pneigh_queue_purge(struct sk_buff_head *list)
164
{
165
        struct sk_buff *skb;
166
 
167
        while ((skb = skb_dequeue(list)) != NULL) {
168
                dev_put(skb->dev);
169
                kfree_skb(skb);
170
        }
171
}
172
 
173
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
174
{
175
        int i;
176
 
177
        write_lock_bh(&tbl->lock);
178
 
179
        for (i=0; i <= NEIGH_HASHMASK; i++) {
180
                struct neighbour *n, **np;
181
 
182
                np = &tbl->hash_buckets[i];
183
                while ((n = *np) != NULL) {
184
                        if (dev && n->dev != dev) {
185
                                np = &n->next;
186
                                continue;
187
                        }
188
                        *np = n->next;
189
                        write_lock_bh(&n->lock);
190
                        n->dead = 1;
191
                        neigh_del_timer(n);
192
                        write_unlock_bh(&n->lock);
193
                        neigh_release(n);
194
                }
195
        }
196
 
197
        write_unlock_bh(&tbl->lock);
198
}
199
 
200
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
201
{
202
        int i;
203
 
204
        write_lock_bh(&tbl->lock);
205
 
206
        for (i=0; i<=NEIGH_HASHMASK; i++) {
207
                struct neighbour *n, **np;
208
 
209
                np = &tbl->hash_buckets[i];
210
                while ((n = *np) != NULL) {
211
                        if (dev && n->dev != dev) {
212
                                np = &n->next;
213
                                continue;
214
                        }
215
                        *np = n->next;
216
                        write_lock(&n->lock);
217
                        neigh_del_timer(n);
218
                        n->dead = 1;
219
 
220
                        if (atomic_read(&n->refcnt) != 1) {
221
                                /* The most unpleasant situation.
222
                                   We must destroy neighbour entry,
223
                                   but someone still uses it.
224
 
225
                                   The destroy will be delayed until
226
                                   the last user releases us, but
227
                                   we must kill timers etc. and move
228
                                   it to safe state.
229
                                 */
230
                                n->parms = &tbl->parms;
231
                                skb_queue_purge(&n->arp_queue);
232
                                n->output = neigh_blackhole;
233
                                if (n->nud_state&NUD_VALID)
234
                                        n->nud_state = NUD_NOARP;
235
                                else
236
                                        n->nud_state = NUD_NONE;
237
                                NEIGH_PRINTK2("neigh %p is stray.\n", n);
238
                        }
239
                        write_unlock(&n->lock);
240
                        neigh_release(n);
241
                }
242
        }
243
 
244
        pneigh_ifdown(tbl, dev);
245
        write_unlock_bh(&tbl->lock);
246
 
247
        del_timer_sync(&tbl->proxy_timer);
248
        pneigh_queue_purge(&tbl->proxy_queue);
249
        return 0;
250
}
251
 
252
static struct neighbour *neigh_alloc(struct neigh_table *tbl)
253
{
254
        struct neighbour *n;
255
        unsigned long now = jiffies;
256
 
257
        if (tbl->entries > tbl->gc_thresh3 ||
258
            (tbl->entries > tbl->gc_thresh2 &&
259
             now - tbl->last_flush > 5*HZ)) {
260
                if (neigh_forced_gc(tbl) == 0 &&
261
                    tbl->entries > tbl->gc_thresh3)
262
                        return NULL;
263
        }
264
 
265
        n = kmem_cache_alloc(tbl->kmem_cachep, SLAB_ATOMIC);
266
        if (n == NULL)
267
                return NULL;
268
 
269
        memset(n, 0, tbl->entry_size);
270
 
271
        skb_queue_head_init(&n->arp_queue);
272
        n->lock = RW_LOCK_UNLOCKED;
273
        n->updated = n->used = now;
274
        n->nud_state = NUD_NONE;
275
        n->output = neigh_blackhole;
276
        n->parms = &tbl->parms;
277
        init_timer(&n->timer);
278
        n->timer.function = neigh_timer_handler;
279
        n->timer.data = (unsigned long)n;
280
        tbl->stats.allocs++;
281
        neigh_glbl_allocs++;
282
        tbl->entries++;
283
        n->tbl = tbl;
284
        atomic_set(&n->refcnt, 1);
285
        n->dead = 1;
286
        return n;
287
}
288
 
289
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
290
                               struct net_device *dev)
291
{
292
        struct neighbour *n;
293
        u32 hash_val;
294
        int key_len = tbl->key_len;
295
 
296
        hash_val = tbl->hash(pkey, dev);
297
 
298
        read_lock_bh(&tbl->lock);
299
        for (n = tbl->hash_buckets[hash_val]; n; n = n->next) {
300
                if (dev == n->dev &&
301
                    memcmp(n->primary_key, pkey, key_len) == 0) {
302
                        neigh_hold(n);
303
                        break;
304
                }
305
        }
306
        read_unlock_bh(&tbl->lock);
307
        return n;
308
}
309
 
310
struct neighbour * neigh_create(struct neigh_table *tbl, const void *pkey,
311
                                struct net_device *dev)
312
{
313
        struct neighbour *n, *n1;
314
        u32 hash_val;
315
        int key_len = tbl->key_len;
316
        int error;
317
 
318
        n = neigh_alloc(tbl);
319
        if (n == NULL)
320
                return ERR_PTR(-ENOBUFS);
321
 
322
        memcpy(n->primary_key, pkey, key_len);
323
        n->dev = dev;
324
        dev_hold(dev);
325
 
326
        /* Protocol specific setup. */
327
        if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
328
                neigh_release(n);
329
                return ERR_PTR(error);
330
        }
331
 
332
        /* Device specific setup. */
333
        if (n->parms->neigh_setup &&
334
            (error = n->parms->neigh_setup(n)) < 0) {
335
                neigh_release(n);
336
                return ERR_PTR(error);
337
        }
338
 
339
        n->confirmed = jiffies - (n->parms->base_reachable_time<<1);
340
 
341
        hash_val = tbl->hash(pkey, dev);
342
 
343
        write_lock_bh(&tbl->lock);
344
        for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
345
                if (dev == n1->dev &&
346
                    memcmp(n1->primary_key, pkey, key_len) == 0) {
347
                        neigh_hold(n1);
348
                        write_unlock_bh(&tbl->lock);
349
                        neigh_release(n);
350
                        return n1;
351
                }
352
        }
353
 
354
        n->next = tbl->hash_buckets[hash_val];
355
        tbl->hash_buckets[hash_val] = n;
356
        n->dead = 0;
357
        neigh_hold(n);
358
        write_unlock_bh(&tbl->lock);
359
        NEIGH_PRINTK2("neigh %p is created.\n", n);
360
        return n;
361
}
362
 
363
struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, const void *pkey,
364
                                    struct net_device *dev, int creat)
365
{
366
        struct pneigh_entry *n;
367
        u32 hash_val;
368
        int key_len = tbl->key_len;
369
 
370
        hash_val = *(u32*)(pkey + key_len - 4);
371
        hash_val ^= (hash_val>>16);
372
        hash_val ^= hash_val>>8;
373
        hash_val ^= hash_val>>4;
374
        hash_val &= PNEIGH_HASHMASK;
375
 
376
        read_lock_bh(&tbl->lock);
377
 
378
        for (n = tbl->phash_buckets[hash_val]; n; n = n->next) {
379
                if (memcmp(n->key, pkey, key_len) == 0 &&
380
                    (n->dev == dev || !n->dev)) {
381
                        read_unlock_bh(&tbl->lock);
382
                        return n;
383
                }
384
        }
385
        read_unlock_bh(&tbl->lock);
386
        if (!creat)
387
                return NULL;
388
 
389
        n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
390
        if (n == NULL)
391
                return NULL;
392
 
393
        memcpy(n->key, pkey, key_len);
394
        n->dev = dev;
395
 
396
        if (tbl->pconstructor && tbl->pconstructor(n)) {
397
                kfree(n);
398
                return NULL;
399
        }
400
 
401
        write_lock_bh(&tbl->lock);
402
        n->next = tbl->phash_buckets[hash_val];
403
        tbl->phash_buckets[hash_val] = n;
404
        write_unlock_bh(&tbl->lock);
405
        return n;
406
}
407
 
408
 
409
int pneigh_delete(struct neigh_table *tbl, const void *pkey, struct net_device *dev)
410
{
411
        struct pneigh_entry *n, **np;
412
        u32 hash_val;
413
        int key_len = tbl->key_len;
414
 
415
        hash_val = *(u32*)(pkey + key_len - 4);
416
        hash_val ^= (hash_val>>16);
417
        hash_val ^= hash_val>>8;
418
        hash_val ^= hash_val>>4;
419
        hash_val &= PNEIGH_HASHMASK;
420
 
421
        for (np = &tbl->phash_buckets[hash_val]; (n=*np) != NULL; np = &n->next) {
422
                if (memcmp(n->key, pkey, key_len) == 0 && n->dev == dev) {
423
                        write_lock_bh(&tbl->lock);
424
                        *np = n->next;
425
                        write_unlock_bh(&tbl->lock);
426
                        if (tbl->pdestructor)
427
                                tbl->pdestructor(n);
428
                        kfree(n);
429
                        return 0;
430
                }
431
        }
432
        return -ENOENT;
433
}
434
 
435
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
436
{
437
        struct pneigh_entry *n, **np;
438
        u32 h;
439
 
440
        for (h=0; h<=PNEIGH_HASHMASK; h++) {
441
                np = &tbl->phash_buckets[h];
442
                while ((n=*np) != NULL) {
443
                        if (n->dev == dev || dev == NULL) {
444
                                *np = n->next;
445
                                if (tbl->pdestructor)
446
                                        tbl->pdestructor(n);
447
                                kfree(n);
448
                                continue;
449
                        }
450
                        np = &n->next;
451
                }
452
        }
453
        return -ENOENT;
454
}
455
 
456
 
457
/*
458
 *      neighbour must already be out of the table;
459
 *
460
 */
461
void neigh_destroy(struct neighbour *neigh)
462
{
463
        struct hh_cache *hh;
464
 
465
        if (!neigh->dead) {
466
                printk("Destroying alive neighbour %p\n", neigh);
467
                dump_stack();
468
                return;
469
        }
470
 
471
        if (neigh_del_timer(neigh))
472
                printk("Impossible event.\n");
473
 
474
        while ((hh = neigh->hh) != NULL) {
475
                neigh->hh = hh->hh_next;
476
                hh->hh_next = NULL;
477
                write_lock_bh(&hh->hh_lock);
478
                hh->hh_output = neigh_blackhole;
479
                write_unlock_bh(&hh->hh_lock);
480
                if (atomic_dec_and_test(&hh->hh_refcnt))
481
                        kfree(hh);
482
        }
483
 
484
        if (neigh->ops && neigh->ops->destructor)
485
                (neigh->ops->destructor)(neigh);
486
 
487
        skb_queue_purge(&neigh->arp_queue);
488
 
489
        dev_put(neigh->dev);
490
 
491
        NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
492
 
493
        neigh_glbl_allocs--;
494
        neigh->tbl->entries--;
495
        kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
496
}
497
 
498
/* Neighbour state is suspicious;
499
   disable fast path.
500
 
501
   Called with write_locked neigh.
502
 */
503
static void neigh_suspect(struct neighbour *neigh)
504
{
505
        struct hh_cache *hh;
506
 
507
        NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
508
 
509
        neigh->output = neigh->ops->output;
510
 
511
        for (hh = neigh->hh; hh; hh = hh->hh_next)
512
                hh->hh_output = neigh->ops->output;
513
}
514
 
515
/* Neighbour state is OK;
516
   enable fast path.
517
 
518
   Called with write_locked neigh.
519
 */
520
static void neigh_connect(struct neighbour *neigh)
521
{
522
        struct hh_cache *hh;
523
 
524
        NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
525
 
526
        neigh->output = neigh->ops->connected_output;
527
 
528
        for (hh = neigh->hh; hh; hh = hh->hh_next)
529
                hh->hh_output = neigh->ops->hh_output;
530
}
531
 
532
/*
533
   Transitions NUD_STALE <-> NUD_REACHABLE do not occur
534
   when fast path is built: we have no timers associated with
535
   these states, we do not have time to check state when sending.
536
   neigh_periodic_timer check periodically neigh->confirmed
537
   time and moves NUD_REACHABLE -> NUD_STALE.
538
 
539
   If a routine wants to know TRUE entry state, it calls
540
   neigh_sync before checking state.
541
 
542
   Called with write_locked neigh.
543
 */
544
 
545
static void neigh_sync(struct neighbour *n)
546
{
547
        unsigned long now = jiffies;
548
        u8 state = n->nud_state;
549
 
550
        if (state&(NUD_NOARP|NUD_PERMANENT))
551
                return;
552
        if (state&NUD_REACHABLE) {
553
                if (now - n->confirmed > n->parms->reachable_time) {
554
                        n->nud_state = NUD_STALE;
555
                        neigh_suspect(n);
556
                }
557
        } else if (state&NUD_VALID) {
558
                if (now - n->confirmed < n->parms->reachable_time) {
559
                        neigh_del_timer(n);
560
                        n->nud_state = NUD_REACHABLE;
561
                        neigh_connect(n);
562
                }
563
        }
564
}
565
 
566
static void SMP_TIMER_NAME(neigh_periodic_timer)(unsigned long arg)
567
{
568
        struct neigh_table *tbl = (struct neigh_table*)arg;
569
        unsigned long now = jiffies;
570
        int i;
571
 
572
 
573
        write_lock(&tbl->lock);
574
 
575
        /*
576
         *      periodicly recompute ReachableTime from random function
577
         */
578
 
579
        if (now - tbl->last_rand > 300*HZ) {
580
                struct neigh_parms *p;
581
                tbl->last_rand = now;
582
                for (p=&tbl->parms; p; p = p->next)
583
                        p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
584
        }
585
 
586
        for (i=0; i <= NEIGH_HASHMASK; i++) {
587
                struct neighbour *n, **np;
588
 
589
                np = &tbl->hash_buckets[i];
590
                while ((n = *np) != NULL) {
591
                        unsigned state;
592
 
593
                        write_lock(&n->lock);
594
 
595
                        state = n->nud_state;
596
                        if (state&(NUD_PERMANENT|NUD_IN_TIMER)) {
597
                                write_unlock(&n->lock);
598
                                goto next_elt;
599
                        }
600
 
601
                        if ((long)(n->used - n->confirmed) < 0)
602
                                n->used = n->confirmed;
603
 
604
                        if (atomic_read(&n->refcnt) == 1 &&
605
                            (state == NUD_FAILED || now - n->used > n->parms->gc_staletime)) {
606
                                *np = n->next;
607
                                n->dead = 1;
608
                                write_unlock(&n->lock);
609
                                neigh_release(n);
610
                                continue;
611
                        }
612
 
613
                        if (n->nud_state&NUD_REACHABLE &&
614
                            now - n->confirmed > n->parms->reachable_time) {
615
                                n->nud_state = NUD_STALE;
616
                                neigh_suspect(n);
617
                        }
618
                        write_unlock(&n->lock);
619
 
620
next_elt:
621
                        np = &n->next;
622
                }
623
        }
624
 
625
        mod_timer(&tbl->gc_timer, now + tbl->gc_interval);
626
        write_unlock(&tbl->lock);
627
}
628
 
629
#ifdef CONFIG_SMP
630
static void neigh_periodic_timer(unsigned long arg)
631
{
632
        struct neigh_table *tbl = (struct neigh_table*)arg;
633
 
634
        tasklet_schedule(&tbl->gc_task);
635
}
636
#endif
637
 
638
static __inline__ int neigh_max_probes(struct neighbour *n)
639
{
640
        struct neigh_parms *p = n->parms;
641
        return p->ucast_probes + p->app_probes + p->mcast_probes;
642
}
643
 
644
 
645
/* Called when a timer expires for a neighbour entry. */
646
 
647
static void neigh_timer_handler(unsigned long arg)
648
{
649
        unsigned long now = jiffies;
650
        struct neighbour *neigh = (struct neighbour*)arg;
651
        unsigned state;
652
        int notify = 0;
653
 
654
        write_lock(&neigh->lock);
655
 
656
        state = neigh->nud_state;
657
 
658
        if (!(state&NUD_IN_TIMER)) {
659
#ifndef CONFIG_SMP
660
                printk("neigh: timer & !nud_in_timer\n");
661
#endif
662
                goto out;
663
        }
664
 
665
        if ((state&NUD_VALID) &&
666
            now - neigh->confirmed < neigh->parms->reachable_time) {
667
                neigh->nud_state = NUD_REACHABLE;
668
                NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
669
                neigh_connect(neigh);
670
                goto out;
671
        }
672
        if (state == NUD_DELAY) {
673
                NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
674
                neigh->nud_state = NUD_PROBE;
675
                atomic_set(&neigh->probes, 0);
676
        }
677
 
678
        if (atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
679
                struct sk_buff *skb;
680
 
681
                neigh->nud_state = NUD_FAILED;
682
                notify = 1;
683
                neigh->tbl->stats.res_failed++;
684
                NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
685
 
686
                /* It is very thin place. report_unreachable is very complicated
687
                   routine. Particularly, it can hit the same neighbour entry!
688
 
689
                   So that, we try to be accurate and avoid dead loop. --ANK
690
                 */
691
                while(neigh->nud_state==NUD_FAILED && (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
692
                        write_unlock(&neigh->lock);
693
                        neigh->ops->error_report(neigh, skb);
694
                        write_lock(&neigh->lock);
695
                }
696
                skb_queue_purge(&neigh->arp_queue);
697
                goto out;
698
        }
699
 
700
        neigh->timer.expires = now + neigh->parms->retrans_time;
701
        add_timer(&neigh->timer);
702
        write_unlock(&neigh->lock);
703
 
704
        neigh->ops->solicit(neigh, skb_peek(&neigh->arp_queue));
705
        atomic_inc(&neigh->probes);
706
        return;
707
 
708
out:
709
        write_unlock(&neigh->lock);
710
#ifdef CONFIG_ARPD
711
        if (notify && neigh->parms->app_probes)
712
                neigh_app_notify(neigh);
713
#endif
714
        neigh_release(neigh);
715
}
716
 
717
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
718
{
719
        write_lock_bh(&neigh->lock);
720
        if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) {
721
                if (!(neigh->nud_state&(NUD_STALE|NUD_INCOMPLETE))) {
722
                        if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
723
                                atomic_set(&neigh->probes, neigh->parms->ucast_probes);
724
                                neigh->nud_state = NUD_INCOMPLETE;
725
                                neigh_hold(neigh);
726
                                neigh->timer.expires = jiffies + neigh->parms->retrans_time;
727
                                add_timer(&neigh->timer);
728
                                write_unlock_bh(&neigh->lock);
729
                                neigh->ops->solicit(neigh, skb);
730
                                atomic_inc(&neigh->probes);
731
                                write_lock_bh(&neigh->lock);
732
                        } else {
733
                                neigh->nud_state = NUD_FAILED;
734
                                write_unlock_bh(&neigh->lock);
735
 
736
                                if (skb)
737
                                        kfree_skb(skb);
738
                                return 1;
739
                        }
740
                }
741
                if (neigh->nud_state == NUD_INCOMPLETE) {
742
                        if (skb) {
743
                                if (skb_queue_len(&neigh->arp_queue) >= neigh->parms->queue_len) {
744
                                        struct sk_buff *buff;
745
                                        buff = neigh->arp_queue.next;
746
                                        __skb_unlink(buff, &neigh->arp_queue);
747
                                        kfree_skb(buff);
748
                                }
749
                                __skb_queue_tail(&neigh->arp_queue, skb);
750
                        }
751
                        write_unlock_bh(&neigh->lock);
752
                        return 1;
753
                }
754
                if (neigh->nud_state == NUD_STALE) {
755
                        NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
756
                        neigh_hold(neigh);
757
                        neigh->nud_state = NUD_DELAY;
758
                        neigh->timer.expires = jiffies + neigh->parms->delay_probe_time;
759
                        add_timer(&neigh->timer);
760
                }
761
        }
762
        write_unlock_bh(&neigh->lock);
763
        return 0;
764
}
765
 
766
static __inline__ void neigh_update_hhs(struct neighbour *neigh)
767
{
768
        struct hh_cache *hh;
769
        void (*update)(struct hh_cache*, struct net_device*, unsigned char*) =
770
                neigh->dev->header_cache_update;
771
 
772
        if (update) {
773
                for (hh=neigh->hh; hh; hh=hh->hh_next) {
774
                        write_lock_bh(&hh->hh_lock);
775
                        update(hh, neigh->dev, neigh->ha);
776
                        write_unlock_bh(&hh->hh_lock);
777
                }
778
        }
779
}
780
 
781
 
782
 
783
/* Generic update routine.
784
   -- lladdr is new lladdr or NULL, if it is not supplied.
785
   -- new    is new state.
786
   -- override==1 allows to override existing lladdr, if it is different.
787
   -- arp==0 means that the change is administrative.
788
 
789
   Caller MUST hold reference count on the entry.
790
 */
791
 
792
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, int override, int arp)
793
{
794
        u8 old;
795
        int err;
796
        int notify = 0;
797
        struct net_device *dev = neigh->dev;
798
 
799
        write_lock_bh(&neigh->lock);
800
        old = neigh->nud_state;
801
 
802
        err = -EPERM;
803
        if (arp && (old&(NUD_NOARP|NUD_PERMANENT)))
804
                goto out;
805
 
806
        if (!(new&NUD_VALID)) {
807
                neigh_del_timer(neigh);
808
                if (old&NUD_CONNECTED)
809
                        neigh_suspect(neigh);
810
                neigh->nud_state = new;
811
                err = 0;
812
                notify = old&NUD_VALID;
813
                goto out;
814
        }
815
 
816
        /* Compare new lladdr with cached one */
817
        if (dev->addr_len == 0) {
818
                /* First case: device needs no address. */
819
                lladdr = neigh->ha;
820
        } else if (lladdr) {
821
                /* The second case: if something is already cached
822
                   and a new address is proposed:
823
                   - compare new & old
824
                   - if they are different, check override flag
825
                 */
826
                if (old&NUD_VALID) {
827
                        if (memcmp(lladdr, neigh->ha, dev->addr_len) == 0)
828
                                lladdr = neigh->ha;
829
                        else if (!override)
830
                                goto out;
831
                }
832
        } else {
833
                /* No address is supplied; if we know something,
834
                   use it, otherwise discard the request.
835
                 */
836
                err = -EINVAL;
837
                if (!(old&NUD_VALID))
838
                        goto out;
839
                lladdr = neigh->ha;
840
        }
841
 
842
        neigh_sync(neigh);
843
        old = neigh->nud_state;
844
        if (new&NUD_CONNECTED)
845
                neigh->confirmed = jiffies;
846
        neigh->updated = jiffies;
847
 
848
        /* If entry was valid and address is not changed,
849
           do not change entry state, if new one is STALE.
850
         */
851
        err = 0;
852
        if (old&NUD_VALID) {
853
                if (lladdr == neigh->ha)
854
                        if (new == old || (new == NUD_STALE && (old&NUD_CONNECTED)))
855
                                goto out;
856
        }
857
        neigh_del_timer(neigh);
858
        neigh->nud_state = new;
859
        if (lladdr != neigh->ha) {
860
                memcpy(&neigh->ha, lladdr, dev->addr_len);
861
                neigh_update_hhs(neigh);
862
                if (!(new&NUD_CONNECTED))
863
                        neigh->confirmed = jiffies - (neigh->parms->base_reachable_time<<1);
864
#ifdef CONFIG_ARPD
865
                notify = 1;
866
#endif
867
        }
868
        if (new == old)
869
                goto out;
870
        if (new&NUD_CONNECTED)
871
                neigh_connect(neigh);
872
        else
873
                neigh_suspect(neigh);
874
        if (!(old&NUD_VALID)) {
875
                struct sk_buff *skb;
876
 
877
                /* Again: avoid dead loop if something went wrong */
878
 
879
                while (neigh->nud_state&NUD_VALID &&
880
                       (skb=__skb_dequeue(&neigh->arp_queue)) != NULL) {
881
                        struct neighbour *n1 = neigh;
882
                        write_unlock_bh(&neigh->lock);
883
                        /* On shaper/eql skb->dst->neighbour != neigh :( */
884
                        if (skb->dst && skb->dst->neighbour)
885
                                n1 = skb->dst->neighbour;
886
                        n1->output(skb);
887
                        write_lock_bh(&neigh->lock);
888
                }
889
                skb_queue_purge(&neigh->arp_queue);
890
        }
891
out:
892
        write_unlock_bh(&neigh->lock);
893
#ifdef CONFIG_ARPD
894
        if (notify && neigh->parms->app_probes)
895
                neigh_app_notify(neigh);
896
#endif
897
        return err;
898
}
899
 
900
struct neighbour * neigh_event_ns(struct neigh_table *tbl,
901
                                  u8 *lladdr, void *saddr,
902
                                  struct net_device *dev)
903
{
904
        struct neighbour *neigh;
905
 
906
        neigh = __neigh_lookup(tbl, saddr, dev, lladdr || !dev->addr_len);
907
        if (neigh)
908
                neigh_update(neigh, lladdr, NUD_STALE, 1, 1);
909
        return neigh;
910
}
911
 
912
static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, u16 protocol)
913
{
914
        struct hh_cache *hh = NULL;
915
        struct net_device *dev = dst->dev;
916
 
917
        for (hh=n->hh; hh; hh = hh->hh_next)
918
                if (hh->hh_type == protocol)
919
                        break;
920
 
921
        if (!hh && (hh = kmalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
922
                memset(hh, 0, sizeof(struct hh_cache));
923
                hh->hh_lock = RW_LOCK_UNLOCKED;
924
                hh->hh_type = protocol;
925
                atomic_set(&hh->hh_refcnt, 0);
926
                hh->hh_next = NULL;
927
                if (dev->hard_header_cache(n, hh)) {
928
                        kfree(hh);
929
                        hh = NULL;
930
                } else {
931
                        atomic_inc(&hh->hh_refcnt);
932
                        hh->hh_next = n->hh;
933
                        n->hh = hh;
934
                        if (n->nud_state&NUD_CONNECTED)
935
                                hh->hh_output = n->ops->hh_output;
936
                        else
937
                                hh->hh_output = n->ops->output;
938
                }
939
        }
940
        if (hh) {
941
                atomic_inc(&hh->hh_refcnt);
942
                dst->hh = hh;
943
        }
944
}
945
 
946
/* This function can be used in contexts, where only old dev_queue_xmit
947
   worked, f.e. if you want to override normal output path (eql, shaper),
948
   but resolution is not made yet.
949
 */
950
 
951
int neigh_compat_output(struct sk_buff *skb)
952
{
953
        struct net_device *dev = skb->dev;
954
 
955
        __skb_pull(skb, skb->nh.raw - skb->data);
956
 
957
        if (dev->hard_header &&
958
            dev->hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL, skb->len) < 0 &&
959
            dev->rebuild_header(skb))
960
                return 0;
961
 
962
        return dev_queue_xmit(skb);
963
}
964
 
965
/* Slow and careful. */
966
 
967
int neigh_resolve_output(struct sk_buff *skb)
968
{
969
        struct dst_entry *dst = skb->dst;
970
        struct neighbour *neigh;
971
 
972
        if (!dst || !(neigh = dst->neighbour))
973
                goto discard;
974
 
975
        __skb_pull(skb, skb->nh.raw - skb->data);
976
 
977
        if (neigh_event_send(neigh, skb) == 0) {
978
                int err;
979
                struct net_device *dev = neigh->dev;
980
                if (dev->hard_header_cache && dst->hh == NULL) {
981
                        write_lock_bh(&neigh->lock);
982
                        if (dst->hh == NULL)
983
                                neigh_hh_init(neigh, dst, dst->ops->protocol);
984
                        err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
985
                        write_unlock_bh(&neigh->lock);
986
                } else {
987
                        read_lock_bh(&neigh->lock);
988
                        err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
989
                        read_unlock_bh(&neigh->lock);
990
                }
991
                if (err >= 0)
992
                        return neigh->ops->queue_xmit(skb);
993
                kfree_skb(skb);
994
                return -EINVAL;
995
        }
996
        return 0;
997
 
998
discard:
999
        NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", dst, dst ? dst->neighbour : NULL);
1000
        kfree_skb(skb);
1001
        return -EINVAL;
1002
}
1003
 
1004
/* As fast as possible without hh cache */
1005
 
1006
int neigh_connected_output(struct sk_buff *skb)
1007
{
1008
        int err;
1009
        struct dst_entry *dst = skb->dst;
1010
        struct neighbour *neigh = dst->neighbour;
1011
        struct net_device *dev = neigh->dev;
1012
 
1013
        __skb_pull(skb, skb->nh.raw - skb->data);
1014
 
1015
        read_lock_bh(&neigh->lock);
1016
        err = dev->hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len);
1017
        read_unlock_bh(&neigh->lock);
1018
        if (err >= 0)
1019
                return neigh->ops->queue_xmit(skb);
1020
        kfree_skb(skb);
1021
        return -EINVAL;
1022
}
1023
 
1024
static void neigh_proxy_process(unsigned long arg)
1025
{
1026
        struct neigh_table *tbl = (struct neigh_table *)arg;
1027
        long sched_next = 0;
1028
        unsigned long now = jiffies;
1029
        struct sk_buff *skb;
1030
 
1031
        spin_lock(&tbl->proxy_queue.lock);
1032
 
1033
        skb = tbl->proxy_queue.next;
1034
 
1035
        while (skb != (struct sk_buff*)&tbl->proxy_queue) {
1036
                struct sk_buff *back = skb;
1037
                long tdif = back->stamp.tv_usec - now;
1038
 
1039
                skb = skb->next;
1040
                if (tdif <= 0) {
1041
                        struct net_device *dev = back->dev;
1042
                        __skb_unlink(back, &tbl->proxy_queue);
1043
                        if (tbl->proxy_redo && netif_running(dev))
1044
                                tbl->proxy_redo(back);
1045
                        else
1046
                                kfree_skb(back);
1047
 
1048
                        dev_put(dev);
1049
                } else if (!sched_next || tdif < sched_next)
1050
                        sched_next = tdif;
1051
        }
1052
        del_timer(&tbl->proxy_timer);
1053
        if (sched_next)
1054
                mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1055
        spin_unlock(&tbl->proxy_queue.lock);
1056
}
1057
 
1058
void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1059
                    struct sk_buff *skb)
1060
{
1061
        unsigned long now = jiffies;
1062
        long sched_next = net_random()%p->proxy_delay;
1063
 
1064
        if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1065
                kfree_skb(skb);
1066
                return;
1067
        }
1068
        skb->stamp.tv_sec = 0;
1069
        skb->stamp.tv_usec = now + sched_next;
1070
 
1071
        spin_lock(&tbl->proxy_queue.lock);
1072
        if (del_timer(&tbl->proxy_timer)) {
1073
                long tval = tbl->proxy_timer.expires - now;
1074
                if (tval < sched_next)
1075
                        sched_next = tval;
1076
        }
1077
        dst_release(skb->dst);
1078
        skb->dst = NULL;
1079
        dev_hold(skb->dev);
1080
        __skb_queue_tail(&tbl->proxy_queue, skb);
1081
        mod_timer(&tbl->proxy_timer, now + sched_next);
1082
        spin_unlock(&tbl->proxy_queue.lock);
1083
}
1084
 
1085
 
1086
struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl)
1087
{
1088
        struct neigh_parms *p;
1089
        p = kmalloc(sizeof(*p), GFP_KERNEL);
1090
        if (p) {
1091
                memcpy(p, &tbl->parms, sizeof(*p));
1092
                p->tbl = tbl;
1093
                p->reachable_time = neigh_rand_reach_time(p->base_reachable_time);
1094
                if (dev && dev->neigh_setup) {
1095
                        if (dev->neigh_setup(dev, p)) {
1096
                                kfree(p);
1097
                                return NULL;
1098
                        }
1099
                }
1100
                p->sysctl_table = NULL;
1101
                write_lock_bh(&tbl->lock);
1102
                p->next = tbl->parms.next;
1103
                tbl->parms.next = p;
1104
                write_unlock_bh(&tbl->lock);
1105
        }
1106
        return p;
1107
}
1108
 
1109
void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1110
{
1111
        struct neigh_parms **p;
1112
 
1113
        if (parms == NULL || parms == &tbl->parms)
1114
                return;
1115
        write_lock_bh(&tbl->lock);
1116
        for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1117
                if (*p == parms) {
1118
                        *p = parms->next;
1119
                        write_unlock_bh(&tbl->lock);
1120
#ifdef CONFIG_SYSCTL
1121
                        neigh_sysctl_unregister(parms);
1122
#endif
1123
                        kfree(parms);
1124
                        return;
1125
                }
1126
        }
1127
        write_unlock_bh(&tbl->lock);
1128
        NEIGH_PRINTK1("neigh_parms_release: not found\n");
1129
}
1130
 
1131
 
1132
void neigh_table_init(struct neigh_table *tbl)
1133
{
1134
        unsigned long now = jiffies;
1135
 
1136
        tbl->parms.reachable_time = neigh_rand_reach_time(tbl->parms.base_reachable_time);
1137
 
1138
        if (tbl->kmem_cachep == NULL)
1139
                tbl->kmem_cachep = kmem_cache_create(tbl->id,
1140
                                                     (tbl->entry_size+15)&~15,
1141
                                                     0, SLAB_HWCACHE_ALIGN,
1142
                                                     NULL, NULL);
1143
 
1144
#ifdef CONFIG_SMP
1145
        tasklet_init(&tbl->gc_task, SMP_TIMER_NAME(neigh_periodic_timer), (unsigned long)tbl);
1146
#endif
1147
        init_timer(&tbl->gc_timer);
1148
        tbl->lock = RW_LOCK_UNLOCKED;
1149
        tbl->gc_timer.data = (unsigned long)tbl;
1150
        tbl->gc_timer.function = neigh_periodic_timer;
1151
        tbl->gc_timer.expires = now + tbl->gc_interval + tbl->parms.reachable_time;
1152
        add_timer(&tbl->gc_timer);
1153
 
1154
        init_timer(&tbl->proxy_timer);
1155
        tbl->proxy_timer.data = (unsigned long)tbl;
1156
        tbl->proxy_timer.function = neigh_proxy_process;
1157
        skb_queue_head_init(&tbl->proxy_queue);
1158
 
1159
        tbl->last_flush = now;
1160
        tbl->last_rand = now + tbl->parms.reachable_time*20;
1161
        write_lock(&neigh_tbl_lock);
1162
        tbl->next = neigh_tables;
1163
        neigh_tables = tbl;
1164
        write_unlock(&neigh_tbl_lock);
1165
}
1166
 
1167
int neigh_table_clear(struct neigh_table *tbl)
1168
{
1169
        struct neigh_table **tp;
1170
 
1171
        /* It is not clean... Fix it to unload IPv6 module safely */
1172
        del_timer_sync(&tbl->gc_timer);
1173
        tasklet_kill(&tbl->gc_task);
1174
        del_timer_sync(&tbl->proxy_timer);
1175
        pneigh_queue_purge(&tbl->proxy_queue);
1176
        neigh_ifdown(tbl, NULL);
1177
        if (tbl->entries)
1178
                printk(KERN_CRIT "neighbour leakage\n");
1179
        write_lock(&neigh_tbl_lock);
1180
        for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1181
                if (*tp == tbl) {
1182
                        *tp = tbl->next;
1183
                        break;
1184
                }
1185
        }
1186
        write_unlock(&neigh_tbl_lock);
1187
#ifdef CONFIG_SYSCTL
1188
        neigh_sysctl_unregister(&tbl->parms);
1189
#endif
1190
        return 0;
1191
}
1192
 
1193
int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1194
{
1195
        struct ndmsg *ndm = NLMSG_DATA(nlh);
1196
        struct rtattr **nda = arg;
1197
        struct neigh_table *tbl;
1198
        struct net_device *dev = NULL;
1199
        int err = 0;
1200
 
1201
        if (ndm->ndm_ifindex) {
1202
                if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1203
                        return -ENODEV;
1204
        }
1205
 
1206
        read_lock(&neigh_tbl_lock);
1207
        for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1208
                struct neighbour *n;
1209
 
1210
                if (tbl->family != ndm->ndm_family)
1211
                        continue;
1212
                read_unlock(&neigh_tbl_lock);
1213
 
1214
                err = -EINVAL;
1215
                if (nda[NDA_DST-1] == NULL ||
1216
                    nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1217
                        goto out;
1218
 
1219
                if (ndm->ndm_flags&NTF_PROXY) {
1220
                        err = pneigh_delete(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1221
                        goto out;
1222
                }
1223
 
1224
                if (dev == NULL)
1225
                        return -EINVAL;
1226
 
1227
                n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1228
                if (n) {
1229
                        err = neigh_update(n, NULL, NUD_FAILED, 1, 0);
1230
                        neigh_release(n);
1231
                }
1232
out:
1233
                if (dev)
1234
                        dev_put(dev);
1235
                return err;
1236
        }
1237
        read_unlock(&neigh_tbl_lock);
1238
 
1239
        if (dev)
1240
                dev_put(dev);
1241
 
1242
        return -EADDRNOTAVAIL;
1243
}
1244
 
1245
int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1246
{
1247
        struct ndmsg *ndm = NLMSG_DATA(nlh);
1248
        struct rtattr **nda = arg;
1249
        struct neigh_table *tbl;
1250
        struct net_device *dev = NULL;
1251
 
1252
        if (ndm->ndm_ifindex) {
1253
                if ((dev = dev_get_by_index(ndm->ndm_ifindex)) == NULL)
1254
                        return -ENODEV;
1255
        }
1256
 
1257
        read_lock(&neigh_tbl_lock);
1258
        for (tbl=neigh_tables; tbl; tbl = tbl->next) {
1259
                int err = 0;
1260
                int override = 1;
1261
                struct neighbour *n;
1262
 
1263
                if (tbl->family != ndm->ndm_family)
1264
                        continue;
1265
                read_unlock(&neigh_tbl_lock);
1266
 
1267
                err = -EINVAL;
1268
                if (nda[NDA_DST-1] == NULL ||
1269
                    nda[NDA_DST-1]->rta_len != RTA_LENGTH(tbl->key_len))
1270
                        goto out;
1271
                if (ndm->ndm_flags&NTF_PROXY) {
1272
                        err = -ENOBUFS;
1273
                        if (pneigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev, 1))
1274
                                err = 0;
1275
                        goto out;
1276
                }
1277
                if (dev == NULL)
1278
                        return -EINVAL;
1279
                err = -EINVAL;
1280
                if (nda[NDA_LLADDR-1] != NULL &&
1281
                    nda[NDA_LLADDR-1]->rta_len != RTA_LENGTH(dev->addr_len))
1282
                        goto out;
1283
                err = 0;
1284
                n = neigh_lookup(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1285
                if (n) {
1286
                        if (nlh->nlmsg_flags&NLM_F_EXCL)
1287
                                err = -EEXIST;
1288
                        override = nlh->nlmsg_flags&NLM_F_REPLACE;
1289
                } else if (!(nlh->nlmsg_flags&NLM_F_CREATE))
1290
                        err = -ENOENT;
1291
                else {
1292
                        n = __neigh_lookup_errno(tbl, RTA_DATA(nda[NDA_DST-1]), dev);
1293
                        if (IS_ERR(n)) {
1294
                                err = PTR_ERR(n);
1295
                                n = NULL;
1296
                        }
1297
                }
1298
                if (err == 0) {
1299
                        err = neigh_update(n, nda[NDA_LLADDR-1] ? RTA_DATA(nda[NDA_LLADDR-1]) : NULL,
1300
                                           ndm->ndm_state,
1301
                                           override, 0);
1302
                }
1303
                if (n)
1304
                        neigh_release(n);
1305
out:
1306
                if (dev)
1307
                        dev_put(dev);
1308
                return err;
1309
        }
1310
        read_unlock(&neigh_tbl_lock);
1311
 
1312
        if (dev)
1313
                dev_put(dev);
1314
        return -EADDRNOTAVAIL;
1315
}
1316
 
1317
 
1318
static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n,
1319
                           u32 pid, u32 seq, int event)
1320
{
1321
        unsigned long now = jiffies;
1322
        struct ndmsg *ndm;
1323
        struct nlmsghdr  *nlh;
1324
        unsigned char    *b = skb->tail;
1325
        struct nda_cacheinfo ci;
1326
        int locked = 0;
1327
 
1328
        nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*ndm));
1329
        ndm = NLMSG_DATA(nlh);
1330
        ndm->ndm_family = n->ops->family;
1331
        ndm->ndm_flags = n->flags;
1332
        ndm->ndm_type = n->type;
1333
        ndm->ndm_ifindex = n->dev->ifindex;
1334
        RTA_PUT(skb, NDA_DST, n->tbl->key_len, n->primary_key);
1335
        read_lock_bh(&n->lock);
1336
        locked=1;
1337
        ndm->ndm_state = n->nud_state;
1338
        if (n->nud_state&NUD_VALID)
1339
                RTA_PUT(skb, NDA_LLADDR, n->dev->addr_len, n->ha);
1340
        ci.ndm_used = now - n->used;
1341
        ci.ndm_confirmed = now - n->confirmed;
1342
        ci.ndm_updated = now - n->updated;
1343
        ci.ndm_refcnt = atomic_read(&n->refcnt) - 1;
1344
        read_unlock_bh(&n->lock);
1345
        locked=0;
1346
        RTA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
1347
        nlh->nlmsg_len = skb->tail - b;
1348
        return skb->len;
1349
 
1350
nlmsg_failure:
1351
rtattr_failure:
1352
        if (locked)
1353
                read_unlock_bh(&n->lock);
1354
        skb_trim(skb, b - skb->data);
1355
        return -1;
1356
}
1357
 
1358
 
1359
static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, struct netlink_callback *cb)
1360
{
1361
        struct neighbour *n;
1362
        int h, s_h;
1363
        int idx, s_idx;
1364
 
1365
        s_h = cb->args[1];
1366
        s_idx = idx = cb->args[2];
1367
        for (h=0; h <= NEIGH_HASHMASK; h++) {
1368
                if (h < s_h) continue;
1369
                if (h > s_h)
1370
                        s_idx = 0;
1371
                read_lock_bh(&tbl->lock);
1372
                for (n = tbl->hash_buckets[h], idx = 0; n;
1373
                     n = n->next, idx++) {
1374
                        if (idx < s_idx)
1375
                                continue;
1376
                        if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
1377
                                            cb->nlh->nlmsg_seq, RTM_NEWNEIGH) <= 0) {
1378
                                read_unlock_bh(&tbl->lock);
1379
                                cb->args[1] = h;
1380
                                cb->args[2] = idx;
1381
                                return -1;
1382
                        }
1383
                }
1384
                read_unlock_bh(&tbl->lock);
1385
        }
1386
 
1387
        cb->args[1] = h;
1388
        cb->args[2] = idx;
1389
        return skb->len;
1390
}
1391
 
1392
int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1393
{
1394
        int t;
1395
        int s_t;
1396
        struct neigh_table *tbl;
1397
        int family = ((struct rtgenmsg*)NLMSG_DATA(cb->nlh))->rtgen_family;
1398
 
1399
        s_t = cb->args[0];
1400
 
1401
        read_lock(&neigh_tbl_lock);
1402
        for (tbl=neigh_tables, t=0; tbl; tbl = tbl->next, t++) {
1403
                if (t < s_t) continue;
1404
                if (family && tbl->family != family)
1405
                        continue;
1406
                if (t > s_t)
1407
                        memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1408
                if (neigh_dump_table(tbl, skb, cb) < 0)
1409
                        break;
1410
        }
1411
        read_unlock(&neigh_tbl_lock);
1412
 
1413
        cb->args[0] = t;
1414
 
1415
        return skb->len;
1416
}
1417
 
1418
#ifdef CONFIG_ARPD
1419
void neigh_app_ns(struct neighbour *n)
1420
{
1421
        struct sk_buff *skb;
1422
        struct nlmsghdr  *nlh;
1423
        int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1424
 
1425
        skb = alloc_skb(size, GFP_ATOMIC);
1426
        if (!skb)
1427
                return;
1428
 
1429
        if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) {
1430
                kfree_skb(skb);
1431
                return;
1432
        }
1433
        nlh = (struct nlmsghdr*)skb->data;
1434
        nlh->nlmsg_flags = NLM_F_REQUEST;
1435
        NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1436
        netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1437
}
1438
 
1439
static void neigh_app_notify(struct neighbour *n)
1440
{
1441
        struct sk_buff *skb;
1442
        struct nlmsghdr  *nlh;
1443
        int size = NLMSG_SPACE(sizeof(struct ndmsg)+256);
1444
 
1445
        skb = alloc_skb(size, GFP_ATOMIC);
1446
        if (!skb)
1447
                return;
1448
 
1449
        if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) {
1450
                kfree_skb(skb);
1451
                return;
1452
        }
1453
        nlh = (struct nlmsghdr*)skb->data;
1454
        NETLINK_CB(skb).dst_groups = RTMGRP_NEIGH;
1455
        netlink_broadcast(rtnl, skb, 0, RTMGRP_NEIGH, GFP_ATOMIC);
1456
}
1457
 
1458
#endif /* CONFIG_ARPD */
1459
 
1460
#ifdef CONFIG_SYSCTL
1461
 
1462
struct neigh_sysctl_table
1463
{
1464
        struct ctl_table_header *sysctl_header;
1465
        ctl_table neigh_vars[17];
1466
        ctl_table neigh_dev[2];
1467
        ctl_table neigh_neigh_dir[2];
1468
        ctl_table neigh_proto_dir[2];
1469
        ctl_table neigh_root_dir[2];
1470
} neigh_sysctl_template = {
1471
        NULL,
1472
        {{NET_NEIGH_MCAST_SOLICIT, "mcast_solicit",
1473
         NULL, sizeof(int), 0644, NULL,
1474
         &proc_dointvec},
1475
        {NET_NEIGH_UCAST_SOLICIT, "ucast_solicit",
1476
         NULL, sizeof(int), 0644, NULL,
1477
         &proc_dointvec},
1478
        {NET_NEIGH_APP_SOLICIT, "app_solicit",
1479
         NULL, sizeof(int), 0644, NULL,
1480
         &proc_dointvec},
1481
        {NET_NEIGH_RETRANS_TIME, "retrans_time",
1482
         NULL, sizeof(int), 0644, NULL,
1483
         &proc_dointvec},
1484
        {NET_NEIGH_REACHABLE_TIME, "base_reachable_time",
1485
         NULL, sizeof(int), 0644, NULL,
1486
         &proc_dointvec_jiffies},
1487
        {NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time",
1488
         NULL, sizeof(int), 0644, NULL,
1489
         &proc_dointvec_jiffies},
1490
        {NET_NEIGH_GC_STALE_TIME, "gc_stale_time",
1491
         NULL, sizeof(int), 0644, NULL,
1492
         &proc_dointvec_jiffies},
1493
        {NET_NEIGH_UNRES_QLEN, "unres_qlen",
1494
         NULL, sizeof(int), 0644, NULL,
1495
         &proc_dointvec},
1496
        {NET_NEIGH_PROXY_QLEN, "proxy_qlen",
1497
         NULL, sizeof(int), 0644, NULL,
1498
         &proc_dointvec},
1499
        {NET_NEIGH_ANYCAST_DELAY, "anycast_delay",
1500
         NULL, sizeof(int), 0644, NULL,
1501
         &proc_dointvec},
1502
        {NET_NEIGH_PROXY_DELAY, "proxy_delay",
1503
         NULL, sizeof(int), 0644, NULL,
1504
         &proc_dointvec},
1505
        {NET_NEIGH_LOCKTIME, "locktime",
1506
         NULL, sizeof(int), 0644, NULL,
1507
         &proc_dointvec},
1508
        {NET_NEIGH_GC_INTERVAL, "gc_interval",
1509
         NULL, sizeof(int), 0644, NULL,
1510
         &proc_dointvec_jiffies},
1511
        {NET_NEIGH_GC_THRESH1, "gc_thresh1",
1512
         NULL, sizeof(int), 0644, NULL,
1513
         &proc_dointvec},
1514
        {NET_NEIGH_GC_THRESH2, "gc_thresh2",
1515
         NULL, sizeof(int), 0644, NULL,
1516
         &proc_dointvec},
1517
        {NET_NEIGH_GC_THRESH3, "gc_thresh3",
1518
         NULL, sizeof(int), 0644, NULL,
1519
         &proc_dointvec},
1520
         {0}},
1521
 
1522
        {{NET_PROTO_CONF_DEFAULT, "default", NULL, 0, 0555, NULL},{0}},
1523
        {{0, "neigh", NULL, 0, 0555, NULL},{0}},
1524
        {{0, NULL, NULL, 0, 0555, NULL},{0}},
1525
        {{CTL_NET, "net", NULL, 0, 0555, NULL},{0}}
1526
};
1527
 
1528
int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
1529
                          int p_id, int pdev_id, char *p_name)
1530
{
1531
        struct neigh_sysctl_table *t;
1532
 
1533
        t = kmalloc(sizeof(*t), GFP_KERNEL);
1534
        if (t == NULL)
1535
                return -ENOBUFS;
1536
        memcpy(t, &neigh_sysctl_template, sizeof(*t));
1537
        t->neigh_vars[0].data = &p->mcast_probes;
1538
        t->neigh_vars[1].data = &p->ucast_probes;
1539
        t->neigh_vars[2].data = &p->app_probes;
1540
        t->neigh_vars[3].data = &p->retrans_time;
1541
        t->neigh_vars[4].data = &p->base_reachable_time;
1542
        t->neigh_vars[5].data = &p->delay_probe_time;
1543
        t->neigh_vars[6].data = &p->gc_staletime;
1544
        t->neigh_vars[7].data = &p->queue_len;
1545
        t->neigh_vars[8].data = &p->proxy_qlen;
1546
        t->neigh_vars[9].data = &p->anycast_delay;
1547
        t->neigh_vars[10].data = &p->proxy_delay;
1548
        t->neigh_vars[11].data = &p->locktime;
1549
        if (dev) {
1550
                t->neigh_dev[0].procname = dev->name;
1551
                t->neigh_dev[0].ctl_name = dev->ifindex;
1552
                memset(&t->neigh_vars[12], 0, sizeof(ctl_table));
1553
        } else {
1554
                t->neigh_vars[12].data = (int*)(p+1);
1555
                t->neigh_vars[13].data = (int*)(p+1) + 1;
1556
                t->neigh_vars[14].data = (int*)(p+1) + 2;
1557
                t->neigh_vars[15].data = (int*)(p+1) + 3;
1558
        }
1559
        t->neigh_neigh_dir[0].ctl_name = pdev_id;
1560
 
1561
        t->neigh_proto_dir[0].procname = p_name;
1562
        t->neigh_proto_dir[0].ctl_name = p_id;
1563
 
1564
        t->neigh_dev[0].child = t->neigh_vars;
1565
        t->neigh_neigh_dir[0].child = t->neigh_dev;
1566
        t->neigh_proto_dir[0].child = t->neigh_neigh_dir;
1567
        t->neigh_root_dir[0].child = t->neigh_proto_dir;
1568
 
1569
        t->sysctl_header = register_sysctl_table(t->neigh_root_dir, 0);
1570
        if (t->sysctl_header == NULL) {
1571
                kfree(t);
1572
                return -ENOBUFS;
1573
        }
1574
        p->sysctl_table = t;
1575
        return 0;
1576
}
1577
 
1578
void neigh_sysctl_unregister(struct neigh_parms *p)
1579
{
1580
        if (p->sysctl_table) {
1581
                struct neigh_sysctl_table *t = p->sysctl_table;
1582
                p->sysctl_table = NULL;
1583
                unregister_sysctl_table(t->sysctl_header);
1584
                kfree(t);
1585
        }
1586
}
1587
 
1588
#endif  /* CONFIG_SYSCTL */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.