OpenCores
URL https://opencores.org/ocsvn/or1k_old/or1k_old/trunk

Subversion Repositories or1k_old

[/] [or1k_old/] [trunk/] [uclinux/] [uClinux-2.0.x/] [net/] [ipv4/] [arp.c] - Blame information for rev 1765

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 199 simons
/* linux/net/inet/arp.c
2
 *
3
 * Copyright (C) 1994 by Florian  La Roche
4
 *
5
 * This module implements the Address Resolution Protocol ARP (RFC 826),
6
 * which is used to convert IP addresses (or in the future maybe other
7
 * high-level addresses) into a low-level hardware address (like an Ethernet
8
 * address).
9
 *
10
 * FIXME:
11
 *      Experiment with better retransmit timers
12
 *      Clean up the timer deletions
13
 *      If you create a proxy entry, set your interface address to the address
14
 *      and then delete it, proxies may get out of sync with reality -
15
 *      check this.
16
 *
17
 * This program is free software; you can redistribute it and/or
18
 * modify it under the terms of the GNU General Public License
19
 * as published by the Free Software Foundation; either version
20
 * 2 of the License, or (at your option) any later version.
21
 *
22
 * Fixes:
23
 *              Alan Cox        :       Removed the ethernet assumptions in
24
 *                                      Florian's code
25
 *              Alan Cox        :       Fixed some small errors in the ARP
26
 *                                      logic
27
 *              Alan Cox        :       Allow >4K in /proc
28
 *              Alan Cox        :       Make ARP add its own protocol entry
29
 *              Ross Martin     :       Rewrote arp_rcv() and arp_get_info()
30
 *              Stephen Henson  :       Add AX25 support to arp_get_info()
31
 *              Alan Cox        :       Drop data when a device is downed.
32
 *              Alan Cox        :       Use init_timer().
33
 *              Alan Cox        :       Double lock fixes.
34
 *              Martin Seine    :       Move the arphdr structure
35
 *                                      to if_arp.h for compatibility.
36
 *                                      with BSD based programs.
37
 *              Andrew Tridgell :       Added ARP netmask code and
38
 *                                      re-arranged proxy handling.
39
 *              Alan Cox        :       Changed to use notifiers.
40
 *              Niibe Yutaka    :       Reply for this device or proxies only.
41
 *              Alan Cox        :       Don't proxy across hardware types!
42
 *              Jonathan Naylor :       Added support for NET/ROM.
43
 *              Mike Shaver     :       RFC1122 checks.
44
 *              Jonathan Naylor :       Only lookup the hardware address for
45
 *                                      the correct hardware type.
46
 *              Germano Caronni :       Assorted subtle races.
47
 *              Craig Schlenter :       Don't modify permanent entry
48
 *                                      during arp_rcv.
49
 *              Russ Nelson     :       Tidied up a few bits.
50
 *              Alexey Kuznetsov:       Major changes to caching and behaviour,
51
 *                                      eg intelligent arp probing and
52
 *                                      generation
53
 *                                      of host down events.
54
 *              Alan Cox        :       Missing unlock in device events.
55
 *              Eckes           :       ARP ioctl control errors.
56
 *              Alexey Kuznetsov:       Arp free fix.
57
 *              Manuel Rodriguez:       Gratuitous ARP.
58
 *              Jonathan Layes  :       Added arpd support through kerneld
59
 *                                      message queue (960314)
60
 *              Mike Shaver     :       /proc/sys/net/ipv4/arp_* support
61
 *              Stuart Cheshire :       Metricom and grat arp fixes
62
 *                                      *** FOR 2.1 clean this up ***
63
 *              Lawrence V. Stefani: (08/12/96) Added FDDI support.
64
 *              David S. Miller :       Fix skb leakage in arp_find.
65
 */
66
 
67
/* RFC1122 Status:
68
   2.3.2.1 (ARP Cache Validation):
69
     MUST provide mechanism to flush stale cache entries (OK)
70
     SHOULD be able to configure cache timeout (OK)
71
     MUST throttle ARP retransmits (OK)
72
   2.3.2.2 (ARP Packet Queue):
73
     SHOULD save at least one packet from each "conversation" with an
74
       unresolved IP address.  (OK)
75
   950727 -- MS
76
*/
77
 
78
#include <linux/types.h>
79
#include <linux/string.h>
80
#include <linux/kernel.h>
81
#include <linux/sched.h>
82
#include <linux/config.h>
83
#include <linux/socket.h>
84
#include <linux/sockios.h>
85
#include <linux/errno.h>
86
#include <linux/in.h>
87
#include <linux/mm.h>
88
#include <linux/inet.h>
89
#include <linux/netdevice.h>
90
#include <linux/etherdevice.h>
91
#include <linux/fddidevice.h>
92
#include <linux/if_arp.h>
93
#include <linux/trdevice.h>
94
#include <linux/skbuff.h>
95
#include <linux/proc_fs.h>
96
#include <linux/stat.h>
97
 
98
#include <net/ip.h>
99
#include <net/icmp.h>
100
#include <net/route.h>
101
#include <net/protocol.h>
102
#include <net/tcp.h>
103
#include <net/sock.h>
104
#include <net/arp.h>
105
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
106
#include <net/ax25.h>
107
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
108
#include <net/netrom.h>
109
#endif
110
#endif
111
#ifdef CONFIG_NET_ALIAS
112
#include <linux/net_alias.h>
113
#endif
114
#ifdef CONFIG_ARPD
115
#include <net/netlink.h>
116
#endif
117
 
118
#include <asm/system.h>
119
#include <asm/segment.h>
120
 
121
#include <stdarg.h>
122
 
123
/*
124
 *      Configurable Parameters
125
 */
126
 
127
/*
128
 *      After that time, an unused entry is deleted from the arp table.
129
 *      RFC1122 recommends set it to 60*HZ, if your site uses proxy arp
130
 *      and dynamic routing.
131
 */
132
 
133
#define ARP_TIMEOUT             (60*HZ)
134
 
135
int sysctl_arp_timeout = ARP_TIMEOUT;
136
 
137
/*
138
 *      How often is ARP cache checked for expire.
139
 *      It is useless to set ARP_CHECK_INTERVAL > ARP_TIMEOUT
140
 */
141
 
142
#define ARP_CHECK_INTERVAL      (60*HZ)
143
 
144
int sysctl_arp_check_interval = ARP_CHECK_INTERVAL;
145
 
146
/*
147
 *      Soft limit on ARP cache size.
148
 *      Note that this number should be greater than
149
 *      number of simultaneously opened sockets, or else
150
 *      hardware header cache will not be efficient.
151
 */
152
 
153
#if RT_CACHE_DEBUG >= 2
154
#define ARP_MAXSIZE     4
155
#else
156
#ifdef CONFIG_ARPD
157
#define ARP_MAXSIZE     64
158
#else
159
#define ARP_MAXSIZE     256
160
#endif /* CONFIG_ARPD */
161
#endif
162
 
163
/*
164
 *      If an arp request is send, ARP_RES_TIME is the timeout value until the
165
 *      next request is send.
166
 *      RFC1122: OK.  Throttles ARPing, as per 2.3.2.1. (MUST)
167
 *      The recommended minimum timeout is 1 second per destination.
168
 *
169
 */
170
 
171
#define ARP_RES_TIME            (5*HZ)
172
 
173
int sysctl_arp_res_time = ARP_RES_TIME;
174
 
175
/*
176
 *      The number of times an broadcast arp request is send, until
177
 *      the host is considered temporarily unreachable.
178
 */
179
 
180
#define ARP_MAX_TRIES           3
181
 
182
int sysctl_arp_max_tries = ARP_MAX_TRIES;
183
 
184
/*
185
 *      The entry is reconfirmed by sending point-to-point ARP
186
 *      request after ARP_CONFIRM_INTERVAL.
187
 *      RFC1122 recommends 60*HZ.
188
 *
189
 *      Warning: there exist nodes, that answer only broadcast
190
 *      ARP requests (Cisco-4000 in hot standby mode?)
191
 *      Now arp code should work with such nodes, but
192
 *      it still will generate redundant broadcast requests, so that
193
 *      this interval should be enough long.
194
 */
195
 
196
#define ARP_CONFIRM_INTERVAL    (300*HZ)
197
 
198
int sysctl_arp_confirm_interval = ARP_CONFIRM_INTERVAL;
199
 
200
/*
201
 *      We wait for answer to unicast request for ARP_CONFIRM_TIMEOUT.
202
 */
203
 
204
#define ARP_CONFIRM_TIMEOUT     ARP_RES_TIME
205
 
206
int sysctl_arp_confirm_timeout = ARP_CONFIRM_TIMEOUT;
207
 
208
/*
209
 *      The number of times an unicast arp request is retried, until
210
 *      the cache entry is considered suspicious.
211
 *      Value 0 means that no unicast pings will be sent.
212
 *      RFC1122 recommends 2.
213
 */
214
 
215
#define ARP_MAX_PINGS           1
216
 
217
int sysctl_arp_max_pings = ARP_MAX_PINGS;
218
 
219
/*
220
 *      When a host is dead, but someone tries to connect it,
221
 *      we do not remove corresponding cache entry (it would
222
 *      be useless, it will be created again immediately)
223
 *      Instead we prolongate interval between broadcasts
224
 *      to ARP_DEAD_RES_TIME.
225
 *      This interval should be not very long.
226
 *      (When the host will be up again, we will notice it only
227
 *      when ARP_DEAD_RES_TIME expires, or when the host will arp us.
228
 */
229
 
230
#define ARP_DEAD_RES_TIME       (60*HZ)
231
 
232
int sysctl_arp_dead_res_time = ARP_DEAD_RES_TIME;
233
 
234
/*
235
 *      This structure defines the ARP mapping cache.
236
 */
237
 
238
struct arp_table
239
{
240
        struct arp_table                *next;                  /* Linked entry list            */
241
        unsigned long                   last_used;              /* For expiry                   */
242
        unsigned long                   last_updated;           /* For expiry                   */
243
        unsigned int                    flags;                  /* Control status               */
244
        u32                             ip;                     /* ip address of entry          */
245
        u32                             mask;                   /* netmask - used for generalised proxy arps (tridge)           */
246
        unsigned char                   ha[MAX_ADDR_LEN];       /* Hardware address             */
247
        struct device                   *dev;                   /* Device the entry is tied to  */
248
        struct hh_cache                 *hh;                    /* Hardware headers chain       */
249
 
250
        /*
251
         *      The following entries are only used for unresolved hw addresses.
252
         */
253
 
254
        struct timer_list               timer;                  /* expire timer                 */
255
        int                             retries;                /* remaining retries            */
256
        struct sk_buff_head             skb;                    /* list of queued packets       */
257
};
258
 
259
 
260
static atomic_t arp_size = 0;
261
 
262
#ifdef CONFIG_ARPD
263
static int arpd_not_running;
264
static int arpd_stamp;
265
#endif
266
 
267
static unsigned int arp_bh_mask;
268
 
269
#define ARP_BH_BACKLOG  1
270
 
271
/*
272
 *      Backlog for ARP updates.
273
 */
274
static struct arp_table *arp_backlog;
275
 
276
/*
277
 *      Backlog for incomplete entries.
278
 */
279
static struct arp_table *arp_req_backlog;
280
 
281
 
282
static void arp_run_bh(void);
283
static void arp_check_expire (unsigned long);
284
static int  arp_update (u32 sip, char *sha, struct device * dev,
285
            unsigned long updated, struct arp_table *ientry, int grat);
286
 
287
static struct timer_list arp_timer =
288
        { NULL, NULL, ARP_CHECK_INTERVAL, 0L, &arp_check_expire };
289
 
290
/*
291
 * The default arp netmask is just 255.255.255.255 which means it's
292
 * a single machine entry. Only proxy entries can have other netmasks
293
 */
294
 
295
#define DEF_ARP_NETMASK (~0)
296
 
297
/*
298
 *      The size of the hash table. Must be a power of two.
299
 */
300
 
301
#define ARP_TABLE_SIZE          16
302
#define FULL_ARP_TABLE_SIZE     (ARP_TABLE_SIZE+1)
303
 
304
struct arp_table *arp_tables[FULL_ARP_TABLE_SIZE] =
305
{
306
        NULL,
307
};
308
 
309
#define arp_proxy_list arp_tables[ARP_TABLE_SIZE]
310
 
311
/*
312
 *      The last bits in the IP address are used for the cache lookup.
313
 *      A special entry is used for proxy arp entries
314
 */
315
 
316
#define HASH(paddr)             (htonl(paddr) & (ARP_TABLE_SIZE - 1))
317
 
318
/*
319
 *      ARP cache semaphore.
320
 *
321
 *      Every time when someone wants to traverse arp table,
322
 *      he MUST call arp_fast_lock.
323
 *      It will guarantee that arp cache list will not change
324
 *      by interrupts and the entry that you found will not
325
 *      disappear unexpectedly.
326
 *
327
 *      If you want to modify arp cache lists, you MUST
328
 *      call arp_fast_lock, and check that you are the only
329
 *      owner of semaphore (arp_lock == 1). If it is not the case
330
 *      you can defer your operation or forgot it,
331
 *      but DO NOT TOUCH lists.
332
 *
333
 *      However, you are allowed to change arp entry contents.
334
 *
335
 *      Assumptions:
336
 *           -- interrupt code MUST have lock/unlock balanced,
337
 *              you cannot lock cache on interrupt and defer unlocking
338
 *              to callback.
339
 *              In particular, it means that lock/unlock are allowed
340
 *              to be non-atomic. They are made atomic, but it was not
341
 *              necessary.
342
 *           -- nobody is allowed to sleep while
343
 *              it keeps arp locked. (route cache has similar locking
344
 *              scheme, but allows sleeping)
345
 *
346
 */
347
 
348
static atomic_t arp_lock;
349
 
350
#define ARP_LOCKED() (arp_lock != 1)
351
 
352
static __inline__ void arp_fast_lock(void)
353
{
354
        atomic_inc(&arp_lock);
355
}
356
 
357
static __inline__ void arp_unlock(void)
358
{
359
        if (atomic_dec_and_test(&arp_lock) && arp_bh_mask)
360
                arp_run_bh();
361
}
362
 
363
/*
364
 * Enqueue to FIFO list.
365
 */
366
 
367
static void arp_enqueue(struct arp_table **q, struct arp_table *entry)
368
{
369
        unsigned long flags;
370
        struct arp_table * tail;
371
 
372
        save_flags(flags);
373
        cli();
374
        tail = *q;
375
        if (!tail)
376
                entry->next = entry;
377
        else
378
        {
379
                entry->next = tail->next;
380
                tail->next = entry;
381
        }
382
        *q = entry;
383
        restore_flags(flags);
384
        return;
385
}
386
 
387
/*
388
 * Dequeue from FIFO list,
389
 * caller should mask interrupts.
390
 */
391
 
392
static struct arp_table * arp_dequeue(struct arp_table **q)
393
{
394
        struct arp_table * entry;
395
 
396
        if (*q)
397
        {
398
                entry = (*q)->next;
399
                (*q)->next = entry->next;
400
                if (entry->next == entry)
401
                        *q = NULL;
402
                entry->next = NULL;
403
                return entry;
404
        }
405
        return NULL;
406
}
407
 
408
/*
409
 * Purge all linked skb's of the entry.
410
 */
411
 
412
static void arp_purge_send_q(struct arp_table *entry)
413
{
414
        struct sk_buff *skb;
415
        unsigned long flags;
416
 
417
        save_flags(flags);
418
        cli();
419
        /* Release the list of `skb' pointers. */
420
        while ((skb = skb_dequeue(&entry->skb)) != NULL)
421
        {
422
                skb_device_lock(skb);
423
                restore_flags(flags);
424
                dev_kfree_skb(skb, FREE_WRITE);
425
                cli();
426
        }
427
        restore_flags(flags);
428
        return;
429
}
430
 
431
/*
432
 *      Release the entry and all resources linked to it: skb's, hh's, timer
433
 *      and certainly memory.
434
 *      The entry should be already removed from lists.
435
 */
436
 
437
static void arp_free_entry(struct arp_table *entry)
438
{
439
        unsigned long flags;
440
        struct hh_cache *hh, *next;
441
 
442
        del_timer(&entry->timer);
443
        arp_purge_send_q(entry);
444
 
445
        save_flags(flags);
446
        cli();
447
        hh = entry->hh;
448
        entry->hh = NULL;
449
        restore_flags(flags);
450
 
451
        for ( ; hh; hh = next)
452
        {
453
                next = hh->hh_next;
454
                hh->hh_uptodate = 0;
455
                hh->hh_next = NULL;
456
                hh->hh_arp = NULL;
457
                if (atomic_dec_and_test(&hh->hh_refcnt))
458
                        kfree_s(hh, sizeof(struct(struct hh_cache)));
459
        }
460
 
461
        kfree_s(entry, sizeof(struct arp_table));
462
        atomic_dec(&arp_size);
463
        return;
464
}
465
 
466
/*
467
 *      Hardware header cache.
468
 *
469
 *      BEWARE! Hardware header cache has no locking, so that
470
 *      it requires especially careful handling.
471
 *      It is the only part of arp+route, where a list
472
 *      should be traversed with masked interrupts.
473
 *      Luckily, this list contains one element 8), as rule.
474
 */
475
 
476
/*
477
 *      How many users has this entry?
478
 *      The answer is reliable only when interrupts are masked.
479
 */
480
 
481
static __inline__ int arp_count_hhs(struct arp_table * entry)
482
{
483
        struct hh_cache *hh;
484
        int count = 0;
485
 
486
        for (hh = entry->hh; hh; hh = hh->hh_next)
487
                count += hh->hh_refcnt-1;
488
 
489
        return count;
490
}
491
 
492
/*
493
 * Signal to device layer, that hardware address may be changed.
494
 */
495
 
496
static __inline__ void arp_update_hhs(struct arp_table * entry)
497
{
498
        struct hh_cache *hh;
499
 
500
        for (hh=entry->hh; hh; hh=hh->hh_next)
501
                entry->dev->header_cache_update(hh, entry->dev, entry->ha);
502
}
503
 
504
/*
505
 *      Invalidate all hh's, so that higher level will not try to use it.
506
 */
507
 
508
static __inline__ void arp_invalidate_hhs(struct arp_table * entry)
509
{
510
        struct hh_cache *hh;
511
 
512
        for (hh=entry->hh; hh; hh=hh->hh_next)
513
                hh->hh_uptodate = 0;
514
}
515
 
516
/*
517
 *      Atomic attaching new hh entry.
518
 *      Return 1, if entry has been freed, rather than attached.
519
 */
520
 
521
static int arp_set_hh(struct hh_cache **hhp, struct hh_cache *hh)
522
{
523
        unsigned long flags;
524
        struct hh_cache *hh1;
525
        struct arp_table *entry;
526
 
527
        atomic_inc(&hh->hh_refcnt);
528
 
529
        save_flags(flags);
530
        cli();
531
        if ((hh1 = *hhp) == NULL)
532
        {
533
                *hhp = hh;
534
                restore_flags(flags);
535
                return 0;
536
        }
537
 
538
        entry = (struct arp_table*)hh->hh_arp;
539
 
540
        /*
541
         *      An hh1 entry is already attached to this point.
542
         *      Is it not linked to arp entry? Link it!
543
         */
544
        if (!hh1->hh_arp && entry)
545
        {
546
                atomic_inc(&hh1->hh_refcnt);
547
                hh1->hh_next = entry->hh;
548
                entry->hh = hh1;
549
                hh1->hh_arp = (void*)entry;
550
                restore_flags(flags);
551
 
552
                if (entry->flags & ATF_COM)
553
                        entry->dev->header_cache_update(hh1, entry->dev, entry->ha);
554
#if RT_CACHE_DEBUG >= 1
555
                printk("arp_set_hh: %08x is reattached. Good!\n", entry->ip);
556
#endif
557
        }
558
#if RT_CACHE_DEBUG >= 1
559
        else if (entry)
560
                printk("arp_set_hh: %08x rr1 ok!\n", entry->ip);
561
#endif
562
        restore_flags(flags);
563
        if (atomic_dec_and_test(&hh->hh_refcnt))
564
                kfree_s(hh, sizeof(struct hh_cache));
565
        return 1;
566
}
567
 
568
static __inline__ struct hh_cache * arp_alloc_hh(int htype)
569
{
570
        struct hh_cache *hh;
571
        hh = kmalloc(sizeof(struct hh_cache), GFP_ATOMIC);
572
        if (hh)
573
        {
574
                memset(hh, 0, sizeof(struct hh_cache));
575
                hh->hh_type = htype;
576
        }
577
        return hh;
578
}
579
 
580
/*
581
 * Test if a hardware address is all zero
582
 */
583
 
584
static __inline__ int empty(unsigned char * addr, int len)
585
{
586
        while (len > 0)
587
        {
588
                if (*addr)
589
                        return 0;
590
                len--;
591
                addr++;
592
        }
593
        return 1;
594
}
595
 
596
 
597
#ifdef CONFIG_ARPD
598
 
599
/*
600
 *      Send ARPD message.
601
 */
602
static void arpd_send(int req, u32 addr, struct device * dev, char *ha,
603
                      unsigned long updated)
604
{
605
        int retval;
606
        struct sk_buff *skb;
607
        struct arpd_request *arpreq;
608
 
609
        if (arpd_not_running)
610
                return;
611
 
612
        skb = alloc_skb(sizeof(struct arpd_request), GFP_ATOMIC);
613
        if (skb == NULL)
614
                return;
615
 
616
        skb->free=1;
617
        arpreq=(struct arpd_request *)skb_put(skb, sizeof(struct arpd_request));
618
        arpreq->req = req;
619
        arpreq->ip  = addr;
620
        arpreq->dev = (unsigned long)dev;
621
        arpreq->stamp = arpd_stamp;
622
        arpreq->updated = updated;
623
        if (ha)
624
                memcpy(arpreq->ha, ha, sizeof(arpreq->ha));
625
 
626
        retval = netlink_post(NETLINK_ARPD, skb);
627
        if (retval)
628
        {
629
                kfree_skb(skb, FREE_WRITE);
630
                if (retval == -EUNATCH)
631
                        arpd_not_running = 1;
632
        }
633
}
634
 
635
/*
636
 *      Send ARPD update message.
637
 */
638
 
639
static __inline__ void arpd_update(struct arp_table * entry)
640
{
641
        if (arpd_not_running)
642
                return;
643
        arpd_send(ARPD_UPDATE, entry->ip, entry->dev, entry->ha,
644
                  entry->last_updated);
645
}
646
 
647
/*
648
 *      Send ARPD lookup request.
649
 */
650
 
651
static __inline__ void arpd_lookup(u32 addr, struct device * dev)
652
{
653
        if (arpd_not_running)
654
                return;
655
        arpd_send(ARPD_LOOKUP, addr, dev, NULL, 0);
656
}
657
 
658
/*
659
 *      Send ARPD flush message.
660
 */
661
 
662
static __inline__ void arpd_flush(struct device * dev)
663
{
664
        if (arpd_not_running)
665
                return;
666
        arpd_send(ARPD_FLUSH, 0, dev, NULL, 0);
667
}
668
 
669
 
670
static int arpd_callback(struct sk_buff *skb)
671
{
672
        struct device * dev;
673
        struct arpd_request *retreq;
674
 
675
        arpd_not_running = 0;
676
 
677
        if (skb->len != sizeof(struct arpd_request))
678
        {
679
                kfree_skb(skb, FREE_READ);
680
                return -EINVAL;
681
        }
682
 
683
        retreq = (struct arpd_request *)skb->data;
684
        dev = (struct device*)retreq->dev;
685
 
686
        if (retreq->stamp != arpd_stamp || !dev)
687
        {
688
                kfree_skb(skb, FREE_READ);
689
                return -EINVAL;
690
        }
691
 
692
        if (!retreq->updated || empty(retreq->ha, sizeof(retreq->ha)))
693
        {
694
/*
695
 *      Invalid mapping: drop it and send ARP broadcast.
696
 */
697
                arp_send(ARPOP_REQUEST, ETH_P_ARP, retreq->ip, dev, dev->pa_addr, NULL,
698
                         dev->dev_addr, NULL);
699
        }
700
        else
701
        {
702
                arp_fast_lock();
703
                arp_update(retreq->ip, retreq->ha, dev, retreq->updated, NULL, 0);
704
                arp_unlock();
705
        }
706
 
707
        kfree_skb(skb, FREE_READ);
708
        return sizeof(struct arpd_request);
709
}
710
 
711
#else
712
 
713
static __inline__ void arpd_update(struct arp_table * entry)
714
{
715
        return;
716
}
717
 
718
#endif /* CONFIG_ARPD */
719
 
720
 
721
 
722
 
723
/*
724
 *      ARP expiration routines.
725
 */
726
 
727
/*
728
 *      Force the expiry of an entry in the internal cache so the memory
729
 *      can be used for a new request.
730
 */
731
 
732
static int arp_force_expire(void)
733
{
734
        int i;
735
        struct arp_table *entry, **pentry;
736
        struct arp_table **oldest_entry = NULL;
737
        unsigned long oldest_used = ~0;
738
        unsigned long flags;
739
        unsigned long now = jiffies;
740
        int result = 0;
741
 
742
        static int last_index;
743
 
744
        if (ARP_LOCKED())
745
                return 0;
746
 
747
        save_flags(flags);
748
 
749
        if (last_index >= ARP_TABLE_SIZE)
750
                last_index = 0;
751
 
752
        for (i = 0; i < ARP_TABLE_SIZE; i++, last_index++)
753
        {
754
                pentry = &arp_tables[last_index & (ARP_TABLE_SIZE-1)];
755
 
756
                while ((entry = *pentry) != NULL)
757
                {
758
                        if (!(entry->flags & ATF_PERM))
759
                        {
760
                                int users;
761
                                cli();
762
                                users = arp_count_hhs(entry);
763
 
764
                                if (!users && now - entry->last_used > sysctl_arp_timeout)
765
                                {
766
                                        *pentry = entry->next;
767
                                        restore_flags(flags);
768
#if RT_CACHE_DEBUG >= 2
769
                                        printk("arp_force_expire: %08x expired\n", entry->ip);
770
#endif
771
                                        arp_free_entry(entry);
772
                                        result++;
773
                                        if (arp_size < ARP_MAXSIZE)
774
                                                goto done;
775
                                        continue;
776
                                }
777
                                restore_flags(flags);
778
                                if (!users && entry->last_used < oldest_used)
779
                                {
780
                                        oldest_entry = pentry;
781
                                        oldest_used = entry->last_used;
782
                                }
783
                        }
784
                        pentry = &entry->next;
785
                }
786
        }
787
 
788
done:
789
        if (result || !oldest_entry)
790
                return result;
791
 
792
        entry = *oldest_entry;
793
        *oldest_entry = entry->next;
794
#if RT_CACHE_DEBUG >= 2
795
        printk("arp_force_expire: expiring %08x\n", entry->ip);
796
#endif
797
        arp_free_entry(entry);
798
        return 1;
799
}
800
 
801
/*
802
 *      Check if there are entries that are too old and remove them. If the
803
 *      ATF_PERM flag is set, they are always left in the arp cache (permanent
804
 *      entries). If an entry was not confirmed for ARP_CONFIRM_INTERVAL,
805
 *      send point-to-point ARP request.
806
 *      If it will not be confirmed for ARP_CONFIRM_TIMEOUT,
807
 *      give it to shred by arp_expire_entry.
808
 */
809
 
810
static void arp_check_expire(unsigned long dummy)
811
{
812
        int i;
813
        unsigned long now = jiffies;
814
 
815
        del_timer(&arp_timer);
816
 
817
#ifdef CONFIG_ARPD
818
        arpd_not_running = 0;
819
#endif
820
 
821
        ip_rt_check_expire();
822
 
823
        arp_fast_lock();
824
 
825
        if (!ARP_LOCKED())
826
        {
827
 
828
                for (i = 0; i < ARP_TABLE_SIZE; i++)
829
                {
830
                        struct arp_table *entry, **pentry;
831
 
832
                        pentry = &arp_tables[i];
833
 
834
                        while ((entry = *pentry) != NULL)
835
                        {
836
                                if (entry->flags & ATF_PERM)
837
                                {
838
                                        pentry = &entry->next;
839
                                        continue;
840
                                }
841
 
842
                                cli();
843
                                if (now - entry->last_used > sysctl_arp_timeout
844
                                    && !arp_count_hhs(entry))
845
                                {
846
                                        *pentry = entry->next;
847
                                        sti();
848
#if RT_CACHE_DEBUG >= 2
849
                                        printk("arp_expire: %08x expired\n", entry->ip);
850
#endif
851
                                        arp_free_entry(entry);
852
                                        continue;
853
                                }
854
                                sti();
855
                                if (entry->last_updated
856
                                    && now - entry->last_updated > sysctl_arp_confirm_interval
857
                                    && !(entry->flags & ATF_PERM))
858
                                {
859
                                        struct device * dev = entry->dev;
860
                                        entry->retries = sysctl_arp_max_tries+sysctl_arp_max_pings;
861
                                        del_timer(&entry->timer);
862
                                        entry->timer.expires = jiffies + ARP_CONFIRM_TIMEOUT;
863
                                        add_timer(&entry->timer);
864
                                        arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip,
865
                                                 dev, dev->pa_addr, entry->ha,
866
                                                 dev->dev_addr, NULL);
867
#if RT_CACHE_DEBUG >= 2
868
                                        printk("arp_expire: %08x requires confirmation\n", entry->ip);
869
#endif
870
                                }
871
                                pentry = &entry->next;  /* go to next entry */
872
                        }
873
                }
874
        }
875
 
876
        arp_unlock();
877
 
878
        /*
879
         *      Set the timer again.
880
         */
881
 
882
        arp_timer.expires = jiffies + sysctl_arp_check_interval;
883
        add_timer(&arp_timer);
884
}
885
 
886
/*
887
 *      This function is called, if an entry is not resolved in ARP_RES_TIME.
888
 *      When more than MAX_ARP_TRIES retries was done, release queued skb's,
889
 *      but not discard entry itself if  it is in use.
890
 */
891
 
892
static void arp_expire_request (unsigned long arg)
893
{
894
        struct arp_table *entry = (struct arp_table *) arg;
895
        struct arp_table **pentry;
896
        unsigned long hash;
897
        unsigned long flags;
898
 
899
        arp_fast_lock();
900
 
901
        save_flags(flags);
902
        cli();
903
        del_timer(&entry->timer);
904
 
905
        /*
906
         *      If arp table is locked, defer expire processing.
907
         */
908
        if (ARP_LOCKED())
909
        {
910
#if RT_CACHE_DEBUG >= 1
911
                printk(KERN_DEBUG "arp_expire_request: %08x deferred\n", entry->ip);
912
#endif
913
                entry->timer.expires = jiffies + HZ/10;
914
                add_timer(&entry->timer);
915
                restore_flags(flags);
916
                arp_unlock();
917
                return;
918
        }
919
 
920
        /*
921
         *      Since all timeouts are handled with interrupts enabled, there is a
922
         *      small chance, that this entry has just been resolved by an incoming
923
         *      packet. This is the only race condition, but it is handled...
924
         *
925
         *      One exception: if entry is COMPLETE but old,
926
         *      it means that point-to-point ARP ping has been failed
927
         *      (It really occurs with Cisco 4000 routers)
928
         *      We should reconfirm it.
929
         */
930
 
931
        if ((entry->flags & ATF_COM) && entry->last_updated
932
            && jiffies - entry->last_updated <= sysctl_arp_confirm_interval)
933
        {
934
                restore_flags(flags);
935
                arp_unlock();
936
                return;
937
        }
938
 
939
        restore_flags(flags);
940
 
941
        if (entry->last_updated && --entry->retries > 0)
942
        {
943
                struct device *dev = entry->dev;
944
 
945
#if RT_CACHE_DEBUG >= 2
946
                printk("arp_expire_request: %08x timed out\n", entry->ip);
947
#endif
948
                /* Set new timer. */
949
                entry->timer.expires = jiffies + sysctl_arp_res_time;
950
                add_timer(&entry->timer);
951
                arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
952
                         entry->retries > sysctl_arp_max_tries ? entry->ha : NULL,
953
                         dev->dev_addr, NULL);
954
                arp_unlock();
955
                return;
956
        }
957
 
958
        /*
959
         *      The host is really dead.
960
         */
961
 
962
        arp_purge_send_q(entry);
963
 
964
        cli();
965
        if (arp_count_hhs(entry))
966
        {
967
                /*
968
                 *      The host is dead, but someone refers to it.
969
                 *      It is useless to drop this entry just now,
970
                 *      it will be born again, so that
971
                 *      we keep it, but slow down retransmitting
972
                 *      to ARP_DEAD_RES_TIME.
973
                 */
974
 
975
                struct device *dev = entry->dev;
976
#if RT_CACHE_DEBUG >= 2
977
                printk("arp_expire_request: %08x is dead\n", entry->ip);
978
#endif
979
                entry->retries = sysctl_arp_max_tries;
980
                entry->flags &= ~ATF_COM;
981
                arp_invalidate_hhs(entry);
982
                restore_flags(flags);
983
 
984
                /*
985
                 *      Declare the entry dead.
986
                 */
987
                entry->last_updated = 0;
988
                arpd_update(entry);
989
 
990
                entry->timer.expires = jiffies + sysctl_arp_dead_res_time;
991
                add_timer(&entry->timer);
992
                arp_send(ARPOP_REQUEST, ETH_P_ARP, entry->ip, dev, dev->pa_addr,
993
                         NULL, dev->dev_addr, NULL);
994
                arp_unlock();
995
                return;
996
        }
997
        restore_flags(flags);
998
 
999
        entry->last_updated = 0;
1000
        arpd_update(entry);
1001
 
1002
        hash = HASH(entry->ip);
1003
 
1004
        pentry = &arp_tables[hash];
1005
 
1006
        while (*pentry != NULL)
1007
        {
1008
                if (*pentry != entry)
1009
                {
1010
                        pentry = &(*pentry)->next;
1011
                        continue;
1012
                }
1013
                *pentry = entry->next;
1014
#if RT_CACHE_DEBUG >= 2
1015
                printk("arp_expire_request: %08x is killed\n", entry->ip);
1016
#endif
1017
                arp_free_entry(entry);
1018
        }
1019
        arp_unlock();
1020
}
1021
 
1022
 
1023
/*
1024
 * Allocate memory for a new entry.  If we are at the maximum limit
1025
 * of the internal ARP cache, arp_force_expire() an entry.  NOTE:
1026
 * arp_force_expire() needs the cache to be locked, so therefore
1027
 * arp_alloc_entry() should only be called with the cache locked too!
1028
 */
1029
 
1030
static struct arp_table * arp_alloc_entry(void)
1031
{
1032
        struct arp_table * entry;
1033
 
1034
 
1035
        if (arp_size >= ARP_MAXSIZE)
1036
                arp_force_expire();
1037
 
1038
        entry = (struct arp_table *)
1039
                kmalloc(sizeof(struct arp_table),GFP_ATOMIC);
1040
 
1041
        if (entry != NULL)
1042
        {
1043
                atomic_inc(&arp_size);
1044
                memset(entry, 0, sizeof(struct arp_table));
1045
 
1046
                entry->mask = DEF_ARP_NETMASK;
1047
                init_timer(&entry->timer);
1048
                entry->timer.function = arp_expire_request;
1049
                entry->timer.data = (unsigned long)entry;
1050
                entry->last_updated = entry->last_used = jiffies;
1051
                skb_queue_head_init(&entry->skb);
1052
        }
1053
        return entry;
1054
}
1055
 
1056
 
1057
 
1058
/*
1059
 *      Purge a device from the ARP queue
1060
 */
1061
 
1062
int arp_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1063
{
1064
        struct device *dev=ptr;
1065
        int i;
1066
 
1067
        if (event != NETDEV_DOWN)
1068
                return NOTIFY_DONE;
1069
 
1070
#ifdef  CONFIG_ARPD
1071
        arpd_flush(dev);
1072
        arpd_stamp++;
1073
#endif
1074
 
1075
        arp_fast_lock();
1076
#if RT_CACHE_DEBUG >= 1  
1077
        if (ARP_LOCKED())
1078
                printk("arp_device_event: impossible\n");
1079
#endif
1080
 
1081
        for (i = 0; i < FULL_ARP_TABLE_SIZE; i++)
1082
        {
1083
                struct arp_table *entry;
1084
                struct arp_table **pentry = &arp_tables[i];
1085
 
1086
                while ((entry = *pentry) != NULL)
1087
                {
1088
                        if (entry->dev == dev)
1089
                        {
1090
                                *pentry = entry->next;  /* remove from list */
1091
                                arp_free_entry(entry);
1092
                        }
1093
                        else
1094
                                pentry = &entry->next;  /* go to next entry */
1095
                }
1096
        }
1097
        arp_unlock();
1098
        return NOTIFY_DONE;
1099
}
1100
 
1101
 
1102
 
1103
/*
1104
 *      This will try to retransmit everything on the queue.
1105
 */
1106
 
1107
static void arp_send_q(struct arp_table *entry)
1108
{
1109
        struct sk_buff *skb;
1110
 
1111
        unsigned long flags;
1112
 
1113
        /*
1114
         *      Empty the entire queue, building its data up ready to send
1115
         */
1116
 
1117
        if(!(entry->flags&ATF_COM))
1118
        {
1119
                printk(KERN_ERR "arp_send_q: incomplete entry for %s\n",
1120
                                in_ntoa(entry->ip));
1121
                /* Can't flush the skb, because RFC1122 says to hang on to */
1122
                /* at least one from any unresolved entry.  --MS */
1123
                /* What's happened is that someone has 'unresolved' the entry
1124
                   as we got to use it - this 'can't happen' -- AC */
1125
                return;
1126
        }
1127
 
1128
        save_flags(flags);
1129
 
1130
        cli();
1131
        while((skb = skb_dequeue(&entry->skb)) != NULL)
1132
        {
1133
                IS_SKB(skb);
1134
                skb_device_lock(skb);
1135
                restore_flags(flags);
1136
                if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
1137
                {
1138
                        skb->arp  = 1;
1139
                        if(skb->sk==NULL)
1140
                                dev_queue_xmit(skb, skb->dev, 0);
1141
                        else
1142
                                dev_queue_xmit(skb,skb->dev,skb->sk->priority);
1143
                }
1144
                cli();
1145
        }
1146
        restore_flags(flags);
1147
}
1148
 
1149
 
1150
static int
1151
arp_update (u32 sip, char *sha, struct device * dev,
1152
            unsigned long updated, struct arp_table *ientry, int grat)
1153
{
1154
        struct arp_table * entry;
1155
        unsigned long hash;
1156
        int do_arpd = 0;
1157
 
1158
        if (updated == 0)
1159
        {
1160
                updated = jiffies;
1161
                do_arpd = 1;
1162
        }
1163
 
1164
        hash = HASH(sip);
1165
 
1166
        for (entry=arp_tables[hash]; entry; entry = entry->next)
1167
                if (entry->ip == sip && entry->dev == dev)
1168
                        break;
1169
 
1170
        if (entry)
1171
        {
1172
/*
1173
 *      Entry found; update it only if it is not a permanent entry.
1174
 */
1175
                if (!(entry->flags & ATF_PERM))
1176
                {
1177
                        del_timer(&entry->timer);
1178
                        entry->last_updated = updated;
1179
                        if (memcmp(entry->ha, sha, dev->addr_len)!=0)
1180
                        {
1181
                                memcpy(entry->ha, sha, dev->addr_len);
1182
                                if (entry->flags & ATF_COM)
1183
                                        arp_update_hhs(entry);
1184
                        }
1185
                        if (do_arpd)
1186
                                arpd_update(entry);
1187
                }
1188
 
1189
                if (!(entry->flags & ATF_COM))
1190
                {
1191
/*
1192
 *      This entry was incomplete.  Delete the retransmit timer
1193
 *      and switch to complete status.
1194
 */
1195
                        entry->flags |= ATF_COM;
1196
                        arp_update_hhs(entry);
1197
/*
1198
 *      Send out waiting packets. We might have problems, if someone is
1199
 *      manually removing entries right now -- entry might become invalid
1200
 *      underneath us.
1201
 */
1202
                        arp_send_q(entry);
1203
                }
1204
                return 1;
1205
        }
1206
 
1207
/*
1208
 *      No entry found.  Need to add a new entry to the arp table.
1209
 */
1210
        entry = ientry;
1211
 
1212
        if (grat && !entry)
1213
                return 0;
1214
 
1215
        if (!entry)
1216
        {
1217
                entry = arp_alloc_entry();
1218
                if (!entry)
1219
                        return 0;
1220
 
1221
                entry->ip = sip;
1222
                entry->flags = ATF_COM;
1223
                memcpy(entry->ha, sha, dev->addr_len);
1224
                entry->dev = dev;
1225
        }
1226
 
1227
        entry->last_updated = updated;
1228
        entry->last_used = jiffies;
1229
        if (do_arpd)
1230
                arpd_update(entry);
1231
 
1232
        if (!ARP_LOCKED())
1233
        {
1234
                entry->next = arp_tables[hash];
1235
                arp_tables[hash] = entry;
1236
                return 0;
1237
        }
1238
#if RT_CACHE_DEBUG >= 2
1239
        printk("arp_update: %08x backlogged\n", entry->ip);
1240
#endif
1241
        arp_enqueue(&arp_backlog, entry);
1242
        arp_bh_mask |= ARP_BH_BACKLOG;
1243
        return 0;
1244
}
1245
 
1246
 
1247
 
1248
static __inline__ struct arp_table *arp_lookup(u32 paddr, struct device * dev)
1249
{
1250
        struct arp_table *entry;
1251
 
1252
        for (entry = arp_tables[HASH(paddr)]; entry != NULL; entry = entry->next)
1253
                if (entry->ip == paddr && (!dev || entry->dev == dev))
1254
                        return entry;
1255
        return NULL;
1256
}
1257
 
1258
/*
1259
 *      Find an arp mapping in the cache. If not found, return false.
1260
 */
1261
 
1262
int arp_query(unsigned char *haddr, u32 paddr, struct device * dev)
1263
{
1264
        struct arp_table *entry;
1265
 
1266
        arp_fast_lock();
1267
 
1268
        entry = arp_lookup(paddr, dev);
1269
 
1270
        if (entry != NULL)
1271
        {
1272
                entry->last_used = jiffies;
1273
                if (entry->flags & ATF_COM)
1274
                {
1275
                        memcpy(haddr, entry->ha, dev->addr_len);
1276
                        arp_unlock();
1277
                        return 1;
1278
                }
1279
        }
1280
        arp_unlock();
1281
        return 0;
1282
}
1283
 
1284
 
1285
static int arp_set_predefined(int addr_hint, unsigned char * haddr, u32 paddr, struct device * dev)
1286
{
1287
        switch (addr_hint)
1288
        {
1289
                case IS_MYADDR:
1290
                        printk(KERN_DEBUG "ARP: arp called for own IP address\n");
1291
                        memcpy(haddr, dev->dev_addr, dev->addr_len);
1292
                        return 1;
1293
#ifdef CONFIG_IP_MULTICAST
1294
                case IS_MULTICAST:
1295
                        if(dev->type==ARPHRD_ETHER || dev->type==ARPHRD_IEEE802
1296
                                || dev->type==ARPHRD_FDDI)
1297
                        {
1298
                                u32 taddr;
1299
                                haddr[0]=0x01;
1300
                                haddr[1]=0x00;
1301
                                haddr[2]=0x5e;
1302
                                taddr=ntohl(paddr);
1303
                                haddr[5]=taddr&0xff;
1304
                                taddr=taddr>>8;
1305
                                haddr[4]=taddr&0xff;
1306
                                taddr=taddr>>8;
1307
                                haddr[3]=taddr&0x7f;
1308
                                return 1;
1309
                        }
1310
                /*
1311
                 *      If a device does not support multicast broadcast the stuff (eg AX.25 for now)
1312
                 */
1313
#endif
1314
 
1315
                case IS_BROADCAST:
1316
                        memcpy(haddr, dev->broadcast, dev->addr_len);
1317
                        return 1;
1318
        }
1319
        return 0;
1320
}
1321
 
1322
/*
1323
 *      Create a new unresolved entry.
1324
 */
1325
 
1326
struct arp_table * arp_new_entry(u32 paddr, struct device *dev, struct hh_cache *hh, struct sk_buff *skb)
1327
{
1328
        struct arp_table *entry;
1329
 
1330
        entry = arp_alloc_entry();
1331
 
1332
        if (entry != NULL)
1333
        {
1334
                entry->ip = paddr;
1335
                entry->dev = dev;
1336
                if (hh)
1337
                {
1338
                        entry->hh = hh;
1339
                        atomic_inc(&hh->hh_refcnt);
1340
                        hh->hh_arp = (void*)entry;
1341
                }
1342
                entry->timer.expires = jiffies + sysctl_arp_res_time;
1343
 
1344
                if (skb != NULL)
1345
                {
1346
                        skb_queue_tail(&entry->skb, skb);
1347
                        skb_device_unlock(skb);
1348
                }
1349
 
1350
                if (!ARP_LOCKED())
1351
                {
1352
                        unsigned long hash = HASH(paddr);
1353
                        entry->next = arp_tables[hash];
1354
                        arp_tables[hash] = entry;
1355
                        add_timer(&entry->timer);
1356
                        entry->retries = sysctl_arp_max_tries;
1357
#ifdef CONFIG_ARPD
1358
                        if (!arpd_not_running)
1359
                                arpd_lookup(paddr, dev);
1360
                        else
1361
#endif
1362
                                arp_send(ARPOP_REQUEST, ETH_P_ARP, paddr, dev, dev->pa_addr, NULL,
1363
                                         dev->dev_addr, NULL);
1364
                }
1365
                else
1366
                {
1367
#if RT_CACHE_DEBUG >= 2
1368
                        printk("arp_new_entry: %08x backlogged\n", entry->ip);
1369
#endif
1370
                        arp_enqueue(&arp_req_backlog, entry);
1371
                        arp_bh_mask |= ARP_BH_BACKLOG;
1372
                }
1373
        }
1374
        return entry;
1375
}
1376
 
1377
 
1378
/*
1379
 *      Find an arp mapping in the cache. If not found, post a request.
1380
 */
1381
 
1382
int arp_find(unsigned char *haddr, u32 paddr, struct device *dev,
1383
             u32 saddr, struct sk_buff *skb)
1384
{
1385
        struct arp_table *entry;
1386
        unsigned long hash;
1387
 
1388
        if (arp_set_predefined(ip_chk_addr(paddr), haddr, paddr, dev))
1389
        {
1390
                if (skb)
1391
                        skb->arp = 1;
1392
                return 0;
1393
        }
1394
 
1395
        hash = HASH(paddr);
1396
        arp_fast_lock();
1397
 
1398
        /*
1399
         *      Find an entry
1400
         */
1401
        entry = arp_lookup(paddr, dev);
1402
 
1403
        if (entry != NULL)      /* It exists */
1404
        {
1405
                if (entry->flags & ATF_COM)
1406
                {
1407
                        entry->last_used = jiffies;
1408
                        memcpy(haddr, entry->ha, dev->addr_len);
1409
                        if (skb)
1410
                                skb->arp = 1;
1411
                        arp_unlock();
1412
                        return 0;
1413
                }
1414
 
1415
                /*
1416
                 *      A request was already sent, but no reply yet. Thus
1417
                 *      queue the packet with the previous attempt
1418
                 */
1419
 
1420
                if (skb != NULL)
1421
                {
1422
                        if (entry->last_updated)
1423
                        {
1424
                                skb_queue_tail(&entry->skb, skb);
1425
                                skb_device_unlock(skb);
1426
                        }
1427
                        /*
1428
                         * If last_updated==0 host is dead, so
1429
                         * drop skb's and set socket error.
1430
                         */
1431
                        else
1432
                        {
1433
                                icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, dev);
1434
                                skb_device_unlock(skb); /* else it is lost forever */
1435
                                dev_kfree_skb(skb, FREE_WRITE);
1436
                        }
1437
                }
1438
                arp_unlock();
1439
                return 1;
1440
        }
1441
 
1442
        entry = arp_new_entry(paddr, dev, NULL, skb);
1443
 
1444
        if (skb != NULL && !entry) {
1445
                skb_device_unlock(skb); /* else it is lost forever */
1446
                dev_kfree_skb(skb, FREE_WRITE);
1447
        }
1448
 
1449
        arp_unlock();
1450
        return 1;
1451
}
1452
 
1453
/*
1454
 *      Binding hardware header cache entry.
1455
 *      It is the only really complicated part of arp code.
1456
 *      We have no locking for hh records, so that
1457
 *      all possible race conditions should be resolved by
1458
 *      cli()/sti() pairs.
1459
 *
1460
 *      Important note: hhs never disappear from lists, if ARP_LOCKED,
1461
 *      this fact allows to scan hh lists with enabled interrupts,
1462
 *      but results in generating duplicate hh entries.
1463
 *      It is harmless. (and I've never seen such event)
1464
 *
1465
 *      Returns 0, if hh has been just created, so that
1466
 *      caller should fill it.
1467
 */
1468
 
1469
int arp_bind_cache(struct hh_cache ** hhp, struct device *dev, unsigned short htype, u32 paddr)
1470
{
1471
        struct arp_table *entry;
1472
        struct hh_cache *hh;
1473
        int addr_hint;
1474
        unsigned long flags;
1475
 
1476
        save_flags(flags);
1477
 
1478
        if ((addr_hint = ip_chk_addr(paddr)) != 0)
1479
        {
1480
                unsigned char haddr[MAX_ADDR_LEN];
1481
                if (*hhp)
1482
                        return 1;
1483
                hh = arp_alloc_hh(htype);
1484
                if (!hh)
1485
                        return 1;
1486
                arp_set_predefined(addr_hint, haddr, paddr, dev);
1487
                dev->header_cache_update(hh, dev, haddr);
1488
                return arp_set_hh(hhp, hh);
1489
        }
1490
 
1491
        arp_fast_lock();
1492
 
1493
        entry = arp_lookup(paddr, dev);
1494
 
1495
        if (entry)
1496
        {
1497
                for (hh = entry->hh; hh; hh=hh->hh_next)
1498
                        if (hh->hh_type == htype)
1499
                                break;
1500
 
1501
                if (hh)
1502
                {
1503
                        arp_set_hh(hhp, hh);
1504
                        arp_unlock();
1505
                        return 1;
1506
                }
1507
        }
1508
 
1509
        hh = arp_alloc_hh(htype);
1510
        if (!hh)
1511
        {
1512
                arp_unlock();
1513
                return 1;
1514
        }
1515
 
1516
        if (entry)
1517
        {
1518
 
1519
                cli();
1520
                hh->hh_arp = (void*)entry;
1521
                hh->hh_next = entry->hh;
1522
                entry->hh = hh;
1523
                atomic_inc(&hh->hh_refcnt);
1524
                restore_flags(flags);
1525
 
1526
                if (entry->flags & ATF_COM)
1527
                        dev->header_cache_update(hh, dev, entry->ha);
1528
 
1529
                if (arp_set_hh(hhp, hh))
1530
                {
1531
                        arp_unlock();
1532
                        return 0;
1533
                }
1534
 
1535
                entry->last_used = jiffies;
1536
                arp_unlock();
1537
                return 0;
1538
        }
1539
 
1540
        entry = arp_new_entry(paddr, dev, hh, NULL);
1541
        if (entry == NULL)
1542
        {
1543
                kfree_s(hh, sizeof(struct hh_cache));
1544
                arp_unlock();
1545
                return 1;
1546
        }
1547
 
1548
        if (!arp_set_hh(hhp, hh))
1549
        {
1550
                arp_unlock();
1551
                return 0;
1552
        }
1553
        arp_unlock();
1554
        return 1;
1555
}
1556
 
1557
static void arp_run_bh()
1558
{
1559
        unsigned long flags;
1560
        struct arp_table *entry, *entry1;
1561
        struct device  * dev;
1562
        unsigned long hash;
1563
        struct hh_cache *hh;
1564
        u32 sip;
1565
 
1566
        save_flags(flags);
1567
        cli();
1568
        arp_fast_lock();
1569
 
1570
        while (arp_bh_mask)
1571
        {
1572
                arp_bh_mask  &= ~ARP_BH_BACKLOG;
1573
 
1574
                while ((entry = arp_dequeue(&arp_backlog)) != NULL)
1575
                {
1576
                        restore_flags(flags);
1577
                        if (arp_update(entry->ip, entry->ha, entry->dev, 0, entry, 0))
1578
                                arp_free_entry(entry);
1579
                        cli();
1580
                }
1581
 
1582
                cli();
1583
                while ((entry = arp_dequeue(&arp_req_backlog)) != NULL)
1584
                {
1585
                        restore_flags(flags);
1586
 
1587
                        dev = entry->dev;
1588
                        sip = entry->ip;
1589
                        hash = HASH(sip);
1590
 
1591
                        for (entry1 = arp_tables[hash]; entry1; entry1 = entry1->next)
1592
                                if (entry1->ip == sip && entry1->dev == dev)
1593
                                        break;
1594
 
1595
                        if (!entry1)
1596
                        {
1597
                                cli();
1598
                                entry->next = arp_tables[hash];
1599
                                arp_tables[hash] = entry;
1600
                                restore_flags(flags);
1601
                                entry->timer.expires = jiffies + sysctl_arp_res_time;
1602
                                entry->retries = sysctl_arp_max_tries;
1603
                                entry->last_used = jiffies;
1604
                                if (!(entry->flags & ATF_COM))
1605
                                {
1606
                                        add_timer(&entry->timer);
1607
#ifdef CONFIG_ARPD
1608
                                        if (!arpd_not_running)
1609
                                                arpd_lookup(sip, dev);
1610
                                        else
1611
#endif
1612
                                                arp_send(ARPOP_REQUEST, ETH_P_ARP, sip, dev, dev->pa_addr, NULL, dev->dev_addr, NULL);
1613
                                }
1614
#if RT_CACHE_DEBUG >= 1
1615
                                printk(KERN_DEBUG "arp_run_bh: %08x reinstalled\n", sip);
1616
#endif
1617
                        }
1618
                        else
1619
                        {
1620
                                struct sk_buff * skb;
1621
                                struct hh_cache * next;
1622
 
1623
                                /* Discard entry, but preserve its hh's and
1624
                                 * skb's.
1625
                                 */
1626
                                cli();
1627
                                for (hh=entry->hh; hh; hh=next)
1628
                                {
1629
                                        next = hh->hh_next;
1630
                                        hh->hh_next = entry1->hh;
1631
                                        entry1->hh = hh;
1632
                                        hh->hh_arp = (void*)entry1;
1633
                                }
1634
                                entry->hh = NULL;
1635
 
1636
                                /* Prune skb list from entry
1637
                                 * and graft it to entry1.
1638
                                 */
1639
                                while ((skb = skb_dequeue(&entry->skb)) != NULL)
1640
                                {
1641
                                        skb_device_lock(skb);
1642
                                        restore_flags(flags);
1643
                                        skb_queue_tail(&entry1->skb, skb);
1644
                                        skb_device_unlock(skb);
1645
                                        cli();
1646
                                }
1647
                                restore_flags(flags);
1648
 
1649
                                arp_free_entry(entry);
1650
 
1651
                                if (entry1->flags & ATF_COM)
1652
                                {
1653
                                        arp_update_hhs(entry1);
1654
                                        arp_send_q(entry1);
1655
                                }
1656
                        }
1657
                        cli();
1658
                }
1659
                cli();
1660
        }
1661
        arp_unlock();
1662
        restore_flags(flags);
1663
}
1664
 
1665
 
1666
/*
1667
 *      Interface to link layer: send routine and receive handler.
1668
 */
1669
 
1670
/*
1671
 *      Create and send an arp packet. If (dest_hw == NULL), we create a broadcast
1672
 *      message.
1673
 */
1674
 
1675
void arp_send(int type, int ptype, u32 dest_ip,
1676
              struct device *dev, u32 src_ip,
1677
              unsigned char *dest_hw, unsigned char *src_hw,
1678
              unsigned char *target_hw)
1679
{
1680
        struct sk_buff *skb;
1681
        struct arphdr *arp;
1682
        unsigned char *arp_ptr;
1683
 
1684
        /*
1685
         *      No arp on this interface.
1686
         */
1687
 
1688
        if (dev->flags&IFF_NOARP)
1689
                return;
1690
 
1691
        /*
1692
         *      Allocate a buffer
1693
         */
1694
 
1695
        skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
1696
                                + dev->hard_header_len, GFP_ATOMIC);
1697
        if (skb == NULL)
1698
        {
1699
                printk(KERN_DEBUG "ARP: no memory to send an arp packet\n");
1700
                return;
1701
        }
1702
        skb_reserve(skb, dev->hard_header_len);
1703
        arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
1704
        skb->arp = 1;
1705
        skb->dev = dev;
1706
        skb->free = 1;
1707
        skb->protocol = htons (ETH_P_ARP);
1708
 
1709
        /*
1710
         *      Fill the device header for the ARP frame
1711
         */
1712
        dev->hard_header(skb,dev,ptype,dest_hw?dest_hw:dev->broadcast,src_hw?src_hw:NULL,skb->len);
1713
 
1714
        /*
1715
         * Fill out the arp protocol part.
1716
         *
1717
         * The arp hardware type should match the device type, except for FDDI,
1718
         * which (according to RFC 1390) should always equal 1 (Ethernet).
1719
         */
1720
#ifdef CONFIG_FDDI
1721
        arp->ar_hrd = (dev->type == ARPHRD_FDDI) ? htons(ARPHRD_ETHER) : htons(dev->type);
1722
#else
1723
        arp->ar_hrd = htons(dev->type);
1724
#endif
1725
        /*
1726
         *      Exceptions everywhere. AX.25 uses the AX.25 PID value not the
1727
         *      DIX code for the protocol. Make these device structure fields.
1728
         */
1729
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1730
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
1731
        arp->ar_pro = (dev->type == ARPHRD_AX25 || dev->type == ARPHRD_NETROM) ? htons(AX25_P_IP) : htons(ETH_P_IP);
1732
#else
1733
        arp->ar_pro = (dev->type != ARPHRD_AX25) ? htons(ETH_P_IP) : htons(AX25_P_IP);
1734
#endif
1735
#else
1736
        arp->ar_pro = htons(ETH_P_IP);
1737
#endif
1738
        arp->ar_hln = dev->addr_len;
1739
        arp->ar_pln = 4;
1740
        arp->ar_op = htons(type);
1741
 
1742
        arp_ptr=(unsigned char *)(arp+1);
1743
 
1744
        memcpy(arp_ptr, src_hw, dev->addr_len);
1745
        arp_ptr+=dev->addr_len;
1746
        memcpy(arp_ptr, &src_ip,4);
1747
        arp_ptr+=4;
1748
        if (target_hw != NULL)
1749
                memcpy(arp_ptr, target_hw, dev->addr_len);
1750
        else
1751
                memset(arp_ptr, 0, dev->addr_len);
1752
        arp_ptr+=dev->addr_len;
1753
        memcpy(arp_ptr, &dest_ip, 4);
1754
 
1755
        dev_queue_xmit(skb, dev, 0);
1756
}
1757
 
1758
 
1759
/*
1760
 *      Receive an arp request by the device layer.
1761
 */
1762
 
1763
int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
1764
{
1765
/*
1766
 *      We shouldn't use this type conversion. Check later.
1767
 */
1768
 
1769
        struct arphdr *arp = (struct arphdr *)skb->h.raw;
1770
        unsigned char *arp_ptr= (unsigned char *)(arp+1);
1771
        unsigned char *sha,*tha;
1772
        u32 sip,tip;
1773
 
1774
        if(skb->pkt_type == PACKET_OTHERHOST)
1775
        {
1776
                kfree_skb(skb, FREE_READ);
1777
                return 0;
1778
        }
1779
 
1780
/*
1781
 *      The hardware length of the packet should match the hardware length
1782
 *      of the device.  Similarly, the hardware types should match.  The
1783
 *      device should be ARP-able.  Also, if pln is not 4, then the lookup
1784
 *      is not from an IP number.  We can't currently handle this, so toss
1785
 *      it.
1786
 */
1787
#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_FDDI)
1788
        if (dev->type == ARPHRD_ETHER || dev->type == ARPHRD_FDDI)
1789
        {
1790
                /*
1791
                 * According to RFC 1390, FDDI devices should accept ARP hardware types
1792
                 * of 1 (Ethernet).  However, to be more robust, we'll accept hardware
1793
                 * types of either 1 (Ethernet) or 6 (IEEE 802.2).
1794
                 *
1795
                 * ETHERNET devices will accept both hardware types, too. (RFC 1042)
1796
                 */
1797
                if (arp->ar_hln != dev->addr_len    ||
1798
                        ((ntohs(arp->ar_hrd) != ARPHRD_ETHER) && (ntohs(arp->ar_hrd) != ARPHRD_IEEE802)) ||
1799
                        dev->flags & IFF_NOARP          ||
1800
                        arp->ar_pln != 4)
1801
                {
1802
                        kfree_skb(skb, FREE_READ);
1803
                        return 0;
1804
                }
1805
        }
1806
        else
1807
        {
1808
                if (arp->ar_hln != dev->addr_len    ||
1809
                        dev->type != ntohs(arp->ar_hrd) ||
1810
                        dev->flags & IFF_NOARP          ||
1811
                        arp->ar_pln != 4)
1812
                {
1813
                        kfree_skb(skb, FREE_READ);
1814
                        return 0;
1815
                }
1816
        }
1817
#else
1818
        if (arp->ar_hln != dev->addr_len    ||
1819
#if CONFIG_AP1000
1820
            /*
1821
             * ARP from cafe-f was found to use ARPHDR_IEEE802 instead of
1822
             * the expected ARPHDR_ETHER.
1823
             */
1824
            (strcmp(dev->name,"fddi") == 0 &&
1825
             arp->ar_hrd != ARPHRD_ETHER && arp->ar_hrd != ARPHRD_IEEE802) ||
1826
            (strcmp(dev->name,"fddi") != 0 &&
1827
             dev->type != ntohs(arp->ar_hrd)) ||
1828
#else
1829
                dev->type != ntohs(arp->ar_hrd) ||
1830
#endif
1831
                dev->flags & IFF_NOARP          ||
1832
                arp->ar_pln != 4)
1833
        {
1834
                kfree_skb(skb, FREE_READ);
1835
                return 0;
1836
        }
1837
#endif
1838
 
1839
/*
1840
 *      Another test.
1841
 *      The logic here is that the protocol being looked up by arp should
1842
 *      match the protocol the device speaks.  If it doesn't, there is a
1843
 *      problem, so toss the packet.
1844
 */
1845
 
1846
        switch (dev->type)
1847
        {
1848
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
1849
                case ARPHRD_AX25:
1850
                        if(arp->ar_pro != htons(AX25_P_IP))
1851
                        {
1852
                                kfree_skb(skb, FREE_READ);
1853
                                return 0;
1854
                        }
1855
                        break;
1856
#endif
1857
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
1858
                case ARPHRD_NETROM:
1859
                        if(arp->ar_pro != htons(AX25_P_IP))
1860
                        {
1861
                                kfree_skb(skb, FREE_READ);
1862
                                return 0;
1863
                        }
1864
                        break;
1865
#endif
1866
                case ARPHRD_ETHER:
1867
                case ARPHRD_ARCNET:
1868
                case ARPHRD_METRICOM:
1869
                case ARPHRD_IEEE802:
1870
                case ARPHRD_FDDI:
1871
                        if(arp->ar_pro != htons(ETH_P_IP))
1872
                        {
1873
                                kfree_skb(skb, FREE_READ);
1874
                                return 0;
1875
                        }
1876
                        break;
1877
 
1878
                default:
1879
                        printk(KERN_ERR "ARP: dev->type mangled!\n");
1880
                        kfree_skb(skb, FREE_READ);
1881
                        return 0;
1882
        }
1883
 
1884
/*
1885
 *      Extract fields
1886
 */
1887
 
1888
        sha=arp_ptr;
1889
        arp_ptr += dev->addr_len;
1890
        memcpy(&sip, arp_ptr, 4);
1891
        arp_ptr += 4;
1892
        tha=arp_ptr;
1893
        arp_ptr += dev->addr_len;
1894
        memcpy(&tip, arp_ptr, 4);
1895
 
1896
/*
1897
 *      Check for bad requests for 127.x.x.x and requests for multicast
1898
 *      addresses.  If this is one such, delete it.
1899
 */
1900
        if (LOOPBACK(tip) || MULTICAST(tip))
1901
        {
1902
                kfree_skb(skb, FREE_READ);
1903
                return 0;
1904
        }
1905
 
1906
/*
1907
 *  Process entry.  The idea here is we want to send a reply if it is a
1908
 *  request for us or if it is a request for someone else that we hold
1909
 *  a proxy for.  We want to add an entry to our cache if it is a reply
1910
 *  to us or if it is a request for our address.
1911
 *  (The assumption for this last is that if someone is requesting our
1912
 *  address, they are probably intending to talk to us, so it saves time
1913
 *  if we cache their address.  Their address is also probably not in
1914
 *  our cache, since ours is not in their cache.)
1915
 *
1916
 *  Putting this another way, we only care about replies if they are to
1917
 *  us, in which case we add them to the cache.  For requests, we care
1918
 *  about those for us and those for our proxies.  We reply to both,
1919
 *  and in the case of requests for us we add the requester to the arp
1920
 *  cache.
1921
 */
1922
 
1923
/*
1924
 *      try to switch to alias device whose addr is tip or closest to sip.
1925
 */
1926
 
1927
#ifdef CONFIG_NET_ALIAS
1928
        if (tip != dev->pa_addr && net_alias_has(skb->dev))
1929
        {
1930
                /*
1931
                 *      net_alias_dev_rx32 returns main dev if it fails to found other.
1932
                 *      if successful, also incr. alias rx count.
1933
                 */
1934
                dev = net_alias_dev_rx32(dev, AF_INET, sip, tip);
1935
 
1936
                if (dev->type != ntohs(arp->ar_hrd) || dev->flags & IFF_NOARP)
1937
                {
1938
                        kfree_skb(skb, FREE_READ);
1939
                        return 0;
1940
                }
1941
        }
1942
#endif
1943
 
1944
        if (arp->ar_op == htons(ARPOP_REQUEST))
1945
        {
1946
 
1947
/*
1948
 * Only reply for the real device address or when it's in our proxy tables
1949
 */
1950
                if (tip != dev->pa_addr)
1951
                {
1952
                        struct arp_table *proxy_entry;
1953
 
1954
/*
1955
 *      To get in here, it is a request for someone else.  We need to
1956
 *      check if that someone else is one of our proxies.  If it isn't,
1957
 *      we can toss it.
1958
 *
1959
 *      Make "longest match" lookup, a la routing.
1960
 */
1961
 
1962
                        arp_fast_lock();
1963
 
1964
                        for (proxy_entry = arp_proxy_list; proxy_entry;
1965
                             proxy_entry = proxy_entry->next)
1966
                        {
1967
                                if (proxy_entry->dev == dev &&
1968
                                    !((proxy_entry->ip^tip)&proxy_entry->mask))
1969
                                        break;
1970
                        }
1971
 
1972
                        if (proxy_entry && (proxy_entry->mask || ((dev->pa_addr^tip)&dev->pa_mask)))
1973
                        {
1974
                                char ha[MAX_ADDR_LEN];
1975
                                struct rtable * rt;
1976
 
1977
                                /* Unlock arp tables to make life for
1978
                                 * ip_rt_route easy. Note, that we are obliged
1979
                                 * to make local copy of hardware address.
1980
                                 */
1981
 
1982
                                memcpy(ha, proxy_entry->ha, dev->addr_len);
1983
                                arp_unlock();
1984
 
1985
                                rt = ip_rt_route(tip, 0, NULL);
1986
                                if (rt  && rt->rt_dev != dev)
1987
                                        arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,ha,sha);
1988
                                ip_rt_put(rt);
1989
 
1990
                        }
1991
                        else
1992
                                arp_unlock();
1993
                }
1994
                else
1995
                        arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
1996
 
1997
        }
1998
 
1999
        arp_fast_lock();
2000
        arp_update(sip, sha, dev, 0, NULL, ip_chk_addr(tip) != IS_MYADDR && dev->type != ARPHRD_METRICOM);
2001
        arp_unlock();
2002
        kfree_skb(skb, FREE_READ);
2003
        return 0;
2004
}
2005
 
2006
 
2007
 
2008
/*
2009
 *      User level interface (ioctl, /proc)
2010
 */
2011
 
2012
/*
2013
 *      Set (create) an ARP cache entry.
2014
 */
2015
 
2016
static int arp_req_set(struct arpreq *r, struct device * dev)
2017
{
2018
        struct arp_table *entry, **entryp;
2019
        struct sockaddr_in *si;
2020
        unsigned char *ha;
2021
        u32 ip;
2022
        u32 mask = DEF_ARP_NETMASK;
2023
        unsigned long flags;
2024
 
2025
        /*
2026
         *      Extract netmask (if supplied).
2027
         */
2028
 
2029
        if (r->arp_flags&ATF_NETMASK)
2030
        {
2031
                si = (struct sockaddr_in *) &r->arp_netmask;
2032
                mask = si->sin_addr.s_addr;
2033
        }
2034
 
2035
        /*
2036
         *      Extract destination.
2037
         */
2038
 
2039
        si = (struct sockaddr_in *) &r->arp_pa;
2040
        ip = si->sin_addr.s_addr;
2041
 
2042
 
2043
        if (r->arp_flags&ATF_PUBL)
2044
        {
2045
                if (!mask && ip)
2046
                        return -EINVAL;
2047
                if (!dev) {
2048
                        dev = dev_getbytype(r->arp_ha.sa_family);
2049
                        if (!dev)
2050
                                return -ENODEV;
2051
                }
2052
        }
2053
        else
2054
        {
2055
                if (!dev)
2056
                {
2057
                        struct rtable * rt;
2058
                        rt = ip_rt_route(ip, 0, NULL);
2059
                        if (!rt)
2060
                                return -ENETUNREACH;
2061
                        dev = rt->rt_dev;
2062
                        ip_rt_put(rt);
2063
                        if (!dev)
2064
                                return -ENODEV;
2065
                }
2066
                if (dev->type != ARPHRD_METRICOM && ip_chk_addr(ip))
2067
                        return -EINVAL;
2068
        }
2069
        if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
2070
                return -ENODEV;
2071
 
2072
        if (r->arp_ha.sa_family != dev->type)
2073
                return -EINVAL;
2074
 
2075
        arp_fast_lock();
2076
#if RT_CACHE_DEBUG >= 1
2077
        if (ARP_LOCKED())
2078
                printk("arp_req_set: bug\n");
2079
#endif
2080
 
2081
        if (!(r->arp_flags & ATF_PUBL))
2082
                entryp = &arp_tables[HASH(ip)];
2083
        else
2084
                entryp = &arp_proxy_list;
2085
 
2086
        while ((entry = *entryp) != NULL)
2087
        {
2088
                /* User supplied arp entries are definitive - RHP 960603 */
2089
 
2090
                if (entry->ip == ip && entry->mask == mask && entry->dev == dev) {
2091
                        *entryp=entry->next;
2092
                        arp_free_entry(entry);
2093
                        continue;
2094
                }
2095
                if ((entry->mask & mask) != mask)
2096
                        break;
2097
                entryp = &entry->next;
2098
        }
2099
 
2100
        entry = arp_alloc_entry();
2101
        if (entry == NULL)
2102
        {
2103
                arp_unlock();
2104
                return -ENOMEM;
2105
        }
2106
        entry->ip = ip;
2107
        entry->dev = dev;
2108
        entry->mask = mask;
2109
        entry->flags = r->arp_flags;
2110
 
2111
        entry->next = *entryp;
2112
        *entryp = entry;
2113
 
2114
        ha = r->arp_ha.sa_data;
2115
        if (empty(ha, dev->addr_len))
2116
                ha = dev->dev_addr;
2117
 
2118
        save_flags(flags);
2119
        cli();
2120
        memcpy(entry->ha, ha, dev->addr_len);
2121
        entry->last_updated = entry->last_used = jiffies;
2122
        entry->flags |= ATF_COM;
2123
        restore_flags(flags);
2124
        arpd_update(entry);
2125
        arp_update_hhs(entry);
2126
        arp_unlock();
2127
        return 0;
2128
}
2129
 
2130
 
2131
 
2132
/*
2133
 *      Get an ARP cache entry.
2134
 */
2135
 
2136
static int arp_req_get(struct arpreq *r, struct device *dev)
2137
{
2138
        struct arp_table *entry;
2139
        struct sockaddr_in *si;
2140
        u32 mask = DEF_ARP_NETMASK;
2141
 
2142
        if (r->arp_flags&ATF_NETMASK)
2143
        {
2144
                si = (struct sockaddr_in *) &r->arp_netmask;
2145
                mask = si->sin_addr.s_addr;
2146
        }
2147
 
2148
        si = (struct sockaddr_in *) &r->arp_pa;
2149
 
2150
        arp_fast_lock();
2151
#if RT_CACHE_DEBUG >= 1
2152
        if (ARP_LOCKED())
2153
                printk("arp_req_set: impossible\n");
2154
#endif
2155
 
2156
        if (!(r->arp_flags & ATF_PUBL))
2157
                entry = arp_tables[HASH(si->sin_addr.s_addr)];
2158
        else
2159
                entry = arp_proxy_list;
2160
 
2161
        for ( ; entry ;entry = entry->next)
2162
        {
2163
                if (entry->ip == si->sin_addr.s_addr
2164
                    && (!dev || entry->dev == dev)
2165
                    && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2166
                {
2167
                        memcpy(r->arp_ha.sa_data, entry->ha, entry->dev->addr_len);
2168
                        r->arp_ha.sa_family = entry->dev->type;
2169
                        r->arp_flags = entry->flags;
2170
                        strncpy(r->arp_dev, entry->dev->name, sizeof(r->arp_dev));
2171
                        arp_unlock();
2172
                        return 0;
2173
                }
2174
        }
2175
 
2176
        arp_unlock();
2177
        return -ENXIO;
2178
}
2179
 
2180
static int arp_req_delete(struct arpreq *r, struct device * dev)
2181
{
2182
        struct sockaddr_in      *si;
2183
        struct arp_table        *entry, **entryp;
2184
        int     retval = -ENXIO;
2185
        u32     mask = DEF_ARP_NETMASK;
2186
 
2187
        if (r->arp_flags&ATF_NETMASK)
2188
        {
2189
                si = (struct sockaddr_in *) &r->arp_netmask;
2190
                mask = si->sin_addr.s_addr;
2191
        }
2192
 
2193
        si = (struct sockaddr_in *) &r->arp_pa;
2194
 
2195
        arp_fast_lock();
2196
#if RT_CACHE_DEBUG >= 1
2197
        if (ARP_LOCKED())
2198
                printk("arp_req_delete: impossible\n");
2199
#endif
2200
 
2201
        if (!(r->arp_flags & ATF_PUBL))
2202
                entryp = &arp_tables[HASH(si->sin_addr.s_addr)];
2203
        else
2204
                entryp = &arp_proxy_list;
2205
 
2206
        while ((entry = *entryp) != NULL)
2207
        {
2208
                if (entry->ip == si->sin_addr.s_addr
2209
                    && (!dev || entry->dev == dev)
2210
                    && (!(r->arp_flags&ATF_NETMASK) || entry->mask == mask))
2211
                {
2212
                        *entryp = entry->next;
2213
                        arp_free_entry(entry);
2214
                        retval = 0;
2215
                        continue;
2216
                }
2217
                entryp = &entry->next;
2218
        }
2219
 
2220
        arp_unlock();
2221
        return retval;
2222
}
2223
 
2224
/*
2225
 *      Handle an ARP layer I/O control request.
2226
 */
2227
 
2228
int arp_ioctl(unsigned int cmd, void *arg)
2229
{
2230
        int err;
2231
        struct arpreq r;
2232
 
2233
        struct device * dev = NULL;
2234
 
2235
        switch(cmd)
2236
        {
2237
                case SIOCDARP:
2238
                case SIOCSARP:
2239
                        if (!suser())
2240
                                return -EPERM;
2241
                case SIOCGARP:
2242
                        err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq));
2243
                        if (err)
2244
                                return err;
2245
                        memcpy_fromfs(&r, arg, sizeof(struct arpreq));
2246
                        break;
2247
                case OLD_SIOCDARP:
2248
                case OLD_SIOCSARP:
2249
                        if (!suser())
2250
                                return -EPERM;
2251
                case OLD_SIOCGARP:
2252
                        err = verify_area(VERIFY_READ, arg, sizeof(struct arpreq_old));
2253
                        if (err)
2254
                                return err;
2255
                        memcpy_fromfs(&r, arg, sizeof(struct arpreq_old));
2256
                        memset(&r.arp_dev, 0, sizeof(r.arp_dev));
2257
                        break;
2258
                default:
2259
                        return -EINVAL;
2260
        }
2261
 
2262
        if (r.arp_pa.sa_family != AF_INET)
2263
                return -EPFNOSUPPORT;
2264
 
2265
        if (!(r.arp_flags & ATF_PUBL))
2266
                r.arp_flags &= ~ATF_NETMASK;
2267
        if (!(r.arp_flags & ATF_NETMASK))
2268
                ((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr=DEF_ARP_NETMASK;
2269
 
2270
        if (r.arp_dev[0])
2271
        {
2272
                if ((dev = dev_get(r.arp_dev)) == NULL)
2273
                        return -ENODEV;
2274
 
2275
                if (!r.arp_ha.sa_family)
2276
                        r.arp_ha.sa_family = dev->type;
2277
                else if (r.arp_ha.sa_family != dev->type)
2278
                        return -EINVAL;
2279
        }
2280
 
2281
        switch(cmd)
2282
        {
2283
                case SIOCDARP:
2284
                        return arp_req_delete(&r, dev);
2285
                case SIOCSARP:
2286
                        return arp_req_set(&r, dev);
2287
                case OLD_SIOCDARP:
2288
                        /* old  SIOCDARP destroys both
2289
                         * normal and proxy mappings
2290
                         */
2291
                        r.arp_flags &= ~ATF_PUBL;
2292
                        err = arp_req_delete(&r, dev);
2293
                        r.arp_flags |= ATF_PUBL;
2294
                        if (!err)
2295
                                arp_req_delete(&r, dev);
2296
                        else
2297
                                err = arp_req_delete(&r, dev);
2298
                        return err;
2299
                case OLD_SIOCSARP:
2300
                        err = arp_req_set(&r, dev);
2301
                        /* old SIOCSARP works so funny,
2302
                         * that its behaviour can be emulated
2303
                         * only approximately 8).
2304
                         * It should work. --ANK
2305
                         */
2306
                        if (r.arp_flags & ATF_PUBL)
2307
                        {
2308
                                r.arp_flags &= ~ATF_PUBL;
2309
                                arp_req_delete(&r, dev);
2310
                        }
2311
                        return err;
2312
                case SIOCGARP:
2313
                        err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq));
2314
                        if (err)
2315
                                return err;
2316
                        err = arp_req_get(&r, dev);
2317
                        if (!err)
2318
                                memcpy_tofs(arg, &r, sizeof(r));
2319
                        return err;
2320
                case OLD_SIOCGARP:
2321
                        err = verify_area(VERIFY_WRITE, arg, sizeof(struct arpreq_old));
2322
                        if (err)
2323
                                return err;
2324
                        r.arp_flags &= ~ATF_PUBL;
2325
                        err = arp_req_get(&r, dev);
2326
                        if (err < 0)
2327
                        {
2328
                                r.arp_flags |= ATF_PUBL;
2329
                                err = arp_req_get(&r, dev);
2330
                        }
2331
                        if (!err)
2332
                                memcpy_tofs(arg, &r, sizeof(struct arpreq_old));
2333
                        return err;
2334
        }
2335
        /*NOTREACHED*/
2336
        return 0;
2337
}
2338
 
2339
/*
2340
 *      Write the contents of the ARP cache to a PROCfs file.
2341
 */
2342
 
2343
#define HBUFFERLEN 30
2344
 
2345
int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
2346
{
2347
        int len=0;
2348
        off_t pos=0;
2349
        int size;
2350
        struct arp_table *entry;
2351
        char hbuffer[HBUFFERLEN];
2352
        int i,j,k;
2353
        const char hexbuf[] =  "0123456789ABCDEF";
2354
 
2355
        size = sprintf(buffer,"IP address       HW type     Flags       HW address            Mask     Device\n");
2356
 
2357
        pos+=size;
2358
        len+=size;
2359
 
2360
        arp_fast_lock();
2361
 
2362
        for(i=0; i<FULL_ARP_TABLE_SIZE; i++)
2363
        {
2364
                for(entry=arp_tables[i]; entry!=NULL; entry=entry->next)
2365
                {
2366
/*
2367
 *      Convert hardware address to XX:XX:XX:XX ... form.
2368
 */
2369
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
2370
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
2371
                        if (entry->dev->type == ARPHRD_AX25 || entry->dev->type == ARPHRD_NETROM)
2372
                             strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2373
                        else {
2374
#else
2375
                        if(entry->dev->type==ARPHRD_AX25)
2376
                             strcpy(hbuffer,ax2asc((ax25_address *)entry->ha));
2377
                        else {
2378
#endif
2379
#endif
2380
 
2381
                        for(k=0,j=0;k<HBUFFERLEN-3 && j<entry->dev->addr_len;j++)
2382
                        {
2383
                                hbuffer[k++]=hexbuf[ (entry->ha[j]>>4)&15 ];
2384
                                hbuffer[k++]=hexbuf[  entry->ha[j]&15     ];
2385
                                hbuffer[k++]=':';
2386
                        }
2387
                        hbuffer[--k]=0;
2388
 
2389
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
2390
                        }
2391
#endif
2392
                        size = sprintf(buffer+len,
2393
                                "%-17s0x%-10x0x%-10x%s",
2394
                                in_ntoa(entry->ip),
2395
                                (unsigned int)entry->dev->type,
2396
                                entry->flags,
2397
                                hbuffer);
2398
#if RT_CACHE_DEBUG < 2
2399
                        size += sprintf(buffer+len+size,
2400
                                 "     %-17s %s\n",
2401
                                 entry->mask==DEF_ARP_NETMASK ?
2402
                                 "*" : in_ntoa(entry->mask), entry->dev->name);
2403
#else
2404
                        size += sprintf(buffer+len+size,
2405
                                 "     %-17s %s\t%d\t%1d\n",
2406
                                 entry->mask==DEF_ARP_NETMASK ?
2407
                                 "*" : in_ntoa(entry->mask), entry->dev->name,
2408
                                 entry->hh ? entry->hh->hh_refcnt : -1,
2409
                                 entry->hh ? entry->hh->hh_uptodate : 0);
2410
#endif
2411
 
2412
                        len += size;
2413
                        pos += size;
2414
 
2415
                        if (pos <= offset)
2416
                                len=0;
2417
                        if (pos >= offset+length)
2418
                                goto done;
2419
                }
2420
        }
2421
done:
2422
        arp_unlock();
2423
 
2424
        *start = buffer+len-(pos-offset);       /* Start of wanted data */
2425
        len = pos-offset;                       /* Start slop */
2426
        if (len>length)
2427
                len = length;                   /* Ending slop */
2428
        return len;
2429
}
2430
 
2431
 
2432
 
2433
/*
2434
 *      Called once on startup.
2435
 */
2436
 
2437
static struct packet_type arp_packet_type =
2438
{
2439
        0,       /* Should be: __constant_htons(ETH_P_ARP) - but this _doesn't_ come out constant! */
2440
        NULL,           /* All devices */
2441
        arp_rcv,
2442
        NULL,
2443
        NULL
2444
};
2445
 
2446
static struct notifier_block arp_dev_notifier={
2447
        arp_device_event,
2448
        NULL,
2449
 
2450
};
2451
 
2452
#ifdef CONFIG_PROC_FS
2453
static struct proc_dir_entry arp_proc_dir_entry = {
2454
                PROC_NET_ARP, 3, "arp",
2455
                S_IFREG | S_IRUGO, 1, 0, 0,
2456
                0, &proc_net_inode_operations,
2457
                arp_get_info
2458
        };
2459
#endif
2460
 
2461
 
2462
void arp_init (void)
2463
{
2464
        /* Register the packet type */
2465
        arp_packet_type.type=htons(ETH_P_ARP);
2466
        dev_add_pack(&arp_packet_type);
2467
        /* Start with the regular checks for expired arp entries. */
2468
        add_timer(&arp_timer);
2469
        /* Register for device down reports */
2470
        register_netdevice_notifier(&arp_dev_notifier);
2471
 
2472
#ifdef CONFIG_PROC_FS
2473
        proc_net_register(&arp_proc_dir_entry);
2474
#endif
2475
 
2476
#ifdef CONFIG_ARPD
2477
        netlink_attach(NETLINK_ARPD, arpd_callback);
2478
#endif
2479
}
2480
 
2481
#ifdef CONFIG_AX25_MODULE
2482
 
2483
/*
2484
 *      ax25 -> ascii conversion
2485
 */
2486
char *ax2asc(ax25_address *a)
2487
{
2488
        static char buf[11];
2489
        char c, *s;
2490
        int n;
2491
 
2492
        for (n = 0, s = buf; n < 6; n++) {
2493
                c = (a->ax25_call[n] >> 1) & 0x7F;
2494
 
2495
                if (c != ' ') *s++ = c;
2496
        }
2497
 
2498
        *s++ = '-';
2499
 
2500
        if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
2501
                *s++ = '1';
2502
                n -= 10;
2503
        }
2504
 
2505
        *s++ = n + '0';
2506
        *s++ = '\0';
2507
 
2508
        if (*buf == '\0' || *buf == '-')
2509
           return "*";
2510
 
2511
        return buf;
2512
 
2513
}
2514
 
2515
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.