OpenCores
URL https://opencores.org/ocsvn/or1k_soc_on_altera_embedded_dev_kit/or1k_soc_on_altera_embedded_dev_kit/trunk

Subversion Repositories or1k_soc_on_altera_embedded_dev_kit

[/] [or1k_soc_on_altera_embedded_dev_kit/] [trunk/] [linux-2.6/] [linux-2.6.24/] [net/] [ipv6/] [ip6_tunnel.c] - Blame information for rev 3

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 3 xianfeng
/*
2
 *      IPv6 tunneling device
3
 *      Linux INET6 implementation
4
 *
5
 *      Authors:
6
 *      Ville Nuorvala          <vnuorval@tcs.hut.fi>
7
 *      Yasuyuki Kozakai        <kozakai@linux-ipv6.org>
8
 *
9
 *      $Id$
10
 *
11
 *      Based on:
12
 *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
13
 *
14
 *      RFC 2473
15
 *
16
 *      This program is free software; you can redistribute it and/or
17
 *      modify it under the terms of the GNU General Public License
18
 *      as published by the Free Software Foundation; either version
19
 *      2 of the License, or (at your option) any later version.
20
 *
21
 */
22
 
23
#include <linux/module.h>
24
#include <linux/capability.h>
25
#include <linux/errno.h>
26
#include <linux/types.h>
27
#include <linux/sockios.h>
28
#include <linux/icmp.h>
29
#include <linux/if.h>
30
#include <linux/in.h>
31
#include <linux/ip.h>
32
#include <linux/if_tunnel.h>
33
#include <linux/net.h>
34
#include <linux/in6.h>
35
#include <linux/netdevice.h>
36
#include <linux/if_arp.h>
37
#include <linux/icmpv6.h>
38
#include <linux/init.h>
39
#include <linux/route.h>
40
#include <linux/rtnetlink.h>
41
#include <linux/netfilter_ipv6.h>
42
 
43
#include <asm/uaccess.h>
44
#include <asm/atomic.h>
45
 
46
#include <net/icmp.h>
47
#include <net/ip.h>
48
#include <net/ipv6.h>
49
#include <net/ip6_route.h>
50
#include <net/addrconf.h>
51
#include <net/ip6_tunnel.h>
52
#include <net/xfrm.h>
53
#include <net/dsfield.h>
54
#include <net/inet_ecn.h>
55
 
56
MODULE_AUTHOR("Ville Nuorvala");
57
MODULE_DESCRIPTION("IPv6 tunneling device");
58
MODULE_LICENSE("GPL");
59
 
60
#define IPV6_TLV_TEL_DST_SIZE 8
61
 
62
#ifdef IP6_TNL_DEBUG
63
#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __FUNCTION__)
64
#else
65
#define IP6_TNL_TRACE(x...) do {;} while(0)
66
#endif
67
 
68
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
69
#define IPV6_TCLASS_SHIFT 20
70
 
71
#define HASH_SIZE  32
72
 
73
#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
74
                     (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
75
                    (HASH_SIZE - 1))
76
 
77
static int ip6_fb_tnl_dev_init(struct net_device *dev);
78
static int ip6_tnl_dev_init(struct net_device *dev);
79
static void ip6_tnl_dev_setup(struct net_device *dev);
80
 
81
/* the IPv6 tunnel fallback device */
82
static struct net_device *ip6_fb_tnl_dev;
83
 
84
 
85
/* lists for storing tunnels in use */
86
static struct ip6_tnl *tnls_r_l[HASH_SIZE];
87
static struct ip6_tnl *tnls_wc[1];
88
static struct ip6_tnl **tnls[2] = { tnls_wc, tnls_r_l };
89
 
90
/* lock for the tunnel lists */
91
static DEFINE_RWLOCK(ip6_tnl_lock);
92
 
93
static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
94
{
95
        struct dst_entry *dst = t->dst_cache;
96
 
97
        if (dst && dst->obsolete &&
98
            dst->ops->check(dst, t->dst_cookie) == NULL) {
99
                t->dst_cache = NULL;
100
                dst_release(dst);
101
                return NULL;
102
        }
103
 
104
        return dst;
105
}
106
 
107
static inline void ip6_tnl_dst_reset(struct ip6_tnl *t)
108
{
109
        dst_release(t->dst_cache);
110
        t->dst_cache = NULL;
111
}
112
 
113
static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
114
{
115
        struct rt6_info *rt = (struct rt6_info *) dst;
116
        t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
117
        dst_release(t->dst_cache);
118
        t->dst_cache = dst;
119
}
120
 
121
/**
122
 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
123
 *   @remote: the address of the tunnel exit-point
124
 *   @local: the address of the tunnel entry-point
125
 *
126
 * Return:
127
 *   tunnel matching given end-points if found,
128
 *   else fallback tunnel if its device is up,
129
 *   else %NULL
130
 **/
131
 
132
static struct ip6_tnl *
133
ip6_tnl_lookup(struct in6_addr *remote, struct in6_addr *local)
134
{
135
        unsigned h0 = HASH(remote);
136
        unsigned h1 = HASH(local);
137
        struct ip6_tnl *t;
138
 
139
        for (t = tnls_r_l[h0 ^ h1]; t; t = t->next) {
140
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
141
                    ipv6_addr_equal(remote, &t->parms.raddr) &&
142
                    (t->dev->flags & IFF_UP))
143
                        return t;
144
        }
145
        if ((t = tnls_wc[0]) != NULL && (t->dev->flags & IFF_UP))
146
                return t;
147
 
148
        return NULL;
149
}
150
 
151
/**
152
 * ip6_tnl_bucket - get head of list matching given tunnel parameters
153
 *   @p: parameters containing tunnel end-points
154
 *
155
 * Description:
156
 *   ip6_tnl_bucket() returns the head of the list matching the
157
 *   &struct in6_addr entries laddr and raddr in @p.
158
 *
159
 * Return: head of IPv6 tunnel list
160
 **/
161
 
162
static struct ip6_tnl **
163
ip6_tnl_bucket(struct ip6_tnl_parm *p)
164
{
165
        struct in6_addr *remote = &p->raddr;
166
        struct in6_addr *local = &p->laddr;
167
        unsigned h = 0;
168
        int prio = 0;
169
 
170
        if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
171
                prio = 1;
172
                h = HASH(remote) ^ HASH(local);
173
        }
174
        return &tnls[prio][h];
175
}
176
 
177
/**
178
 * ip6_tnl_link - add tunnel to hash table
179
 *   @t: tunnel to be added
180
 **/
181
 
182
static void
183
ip6_tnl_link(struct ip6_tnl *t)
184
{
185
        struct ip6_tnl **tp = ip6_tnl_bucket(&t->parms);
186
 
187
        t->next = *tp;
188
        write_lock_bh(&ip6_tnl_lock);
189
        *tp = t;
190
        write_unlock_bh(&ip6_tnl_lock);
191
}
192
 
193
/**
194
 * ip6_tnl_unlink - remove tunnel from hash table
195
 *   @t: tunnel to be removed
196
 **/
197
 
198
static void
199
ip6_tnl_unlink(struct ip6_tnl *t)
200
{
201
        struct ip6_tnl **tp;
202
 
203
        for (tp = ip6_tnl_bucket(&t->parms); *tp; tp = &(*tp)->next) {
204
                if (t == *tp) {
205
                        write_lock_bh(&ip6_tnl_lock);
206
                        *tp = t->next;
207
                        write_unlock_bh(&ip6_tnl_lock);
208
                        break;
209
                }
210
        }
211
}
212
 
213
/**
214
 * ip6_tnl_create() - create a new tunnel
215
 *   @p: tunnel parameters
216
 *   @pt: pointer to new tunnel
217
 *
218
 * Description:
219
 *   Create tunnel matching given parameters.
220
 *
221
 * Return:
222
 *   created tunnel or NULL
223
 **/
224
 
225
static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p)
226
{
227
        struct net_device *dev;
228
        struct ip6_tnl *t;
229
        char name[IFNAMSIZ];
230
        int err;
231
 
232
        if (p->name[0]) {
233
                strlcpy(name, p->name, IFNAMSIZ);
234
        } else {
235
                int i;
236
                for (i = 1; i < IP6_TNL_MAX; i++) {
237
                        sprintf(name, "ip6tnl%d", i);
238
                        if (__dev_get_by_name(&init_net, name) == NULL)
239
                                break;
240
                }
241
                if (i == IP6_TNL_MAX)
242
                        goto failed;
243
        }
244
        dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
245
        if (dev == NULL)
246
                goto failed;
247
 
248
        t = netdev_priv(dev);
249
        dev->init = ip6_tnl_dev_init;
250
        t->parms = *p;
251
 
252
        if ((err = register_netdevice(dev)) < 0) {
253
                free_netdev(dev);
254
                goto failed;
255
        }
256
        dev_hold(dev);
257
        ip6_tnl_link(t);
258
        return t;
259
failed:
260
        return NULL;
261
}
262
 
263
/**
264
 * ip6_tnl_locate - find or create tunnel matching given parameters
265
 *   @p: tunnel parameters
266
 *   @create: != 0 if allowed to create new tunnel if no match found
267
 *
268
 * Description:
269
 *   ip6_tnl_locate() first tries to locate an existing tunnel
270
 *   based on @parms. If this is unsuccessful, but @create is set a new
271
 *   tunnel device is created and registered for use.
272
 *
273
 * Return:
274
 *   matching tunnel or NULL
275
 **/
276
 
277
static struct ip6_tnl *ip6_tnl_locate(struct ip6_tnl_parm *p, int create)
278
{
279
        struct in6_addr *remote = &p->raddr;
280
        struct in6_addr *local = &p->laddr;
281
        struct ip6_tnl *t;
282
 
283
        for (t = *ip6_tnl_bucket(p); t; t = t->next) {
284
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
285
                    ipv6_addr_equal(remote, &t->parms.raddr))
286
                        return t;
287
        }
288
        if (!create)
289
                return NULL;
290
        return ip6_tnl_create(p);
291
}
292
 
293
/**
294
 * ip6_tnl_dev_uninit - tunnel device uninitializer
295
 *   @dev: the device to be destroyed
296
 *
297
 * Description:
298
 *   ip6_tnl_dev_uninit() removes tunnel from its list
299
 **/
300
 
301
static void
302
ip6_tnl_dev_uninit(struct net_device *dev)
303
{
304
        struct ip6_tnl *t = netdev_priv(dev);
305
 
306
        if (dev == ip6_fb_tnl_dev) {
307
                write_lock_bh(&ip6_tnl_lock);
308
                tnls_wc[0] = NULL;
309
                write_unlock_bh(&ip6_tnl_lock);
310
        } else {
311
                ip6_tnl_unlink(t);
312
        }
313
        ip6_tnl_dst_reset(t);
314
        dev_put(dev);
315
}
316
 
317
/**
318
 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
319
 *   @skb: received socket buffer
320
 *
321
 * Return:
322
 *   0 if none was found,
323
 *   else index to encapsulation limit
324
 **/
325
 
326
static __u16
327
parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
328
{
329
        struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw;
330
        __u8 nexthdr = ipv6h->nexthdr;
331
        __u16 off = sizeof (*ipv6h);
332
 
333
        while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
334
                __u16 optlen = 0;
335
                struct ipv6_opt_hdr *hdr;
336
                if (raw + off + sizeof (*hdr) > skb->data &&
337
                    !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
338
                        break;
339
 
340
                hdr = (struct ipv6_opt_hdr *) (raw + off);
341
                if (nexthdr == NEXTHDR_FRAGMENT) {
342
                        struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
343
                        if (frag_hdr->frag_off)
344
                                break;
345
                        optlen = 8;
346
                } else if (nexthdr == NEXTHDR_AUTH) {
347
                        optlen = (hdr->hdrlen + 2) << 2;
348
                } else {
349
                        optlen = ipv6_optlen(hdr);
350
                }
351
                if (nexthdr == NEXTHDR_DEST) {
352
                        __u16 i = off + 2;
353
                        while (1) {
354
                                struct ipv6_tlv_tnl_enc_lim *tel;
355
 
356
                                /* No more room for encapsulation limit */
357
                                if (i + sizeof (*tel) > off + optlen)
358
                                        break;
359
 
360
                                tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
361
                                /* return index of option if found and valid */
362
                                if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
363
                                    tel->length == 1)
364
                                        return i;
365
                                /* else jump to next option */
366
                                if (tel->type)
367
                                        i += tel->length + 2;
368
                                else
369
                                        i++;
370
                        }
371
                }
372
                nexthdr = hdr->nexthdr;
373
                off += optlen;
374
        }
375
        return 0;
376
}
377
 
378
/**
379
 * ip6_tnl_err - tunnel error handler
380
 *
381
 * Description:
382
 *   ip6_tnl_err() should handle errors in the tunnel according
383
 *   to the specifications in RFC 2473.
384
 **/
385
 
386
static int
387
ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
388
            int *type, int *code, int *msg, __u32 *info, int offset)
389
{
390
        struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data;
391
        struct ip6_tnl *t;
392
        int rel_msg = 0;
393
        int rel_type = ICMPV6_DEST_UNREACH;
394
        int rel_code = ICMPV6_ADDR_UNREACH;
395
        __u32 rel_info = 0;
396
        __u16 len;
397
        int err = -ENOENT;
398
 
399
        /* If the packet doesn't contain the original IPv6 header we are
400
           in trouble since we might need the source address for further
401
           processing of the error. */
402
 
403
        read_lock(&ip6_tnl_lock);
404
        if ((t = ip6_tnl_lookup(&ipv6h->daddr, &ipv6h->saddr)) == NULL)
405
                goto out;
406
 
407
        if (t->parms.proto != ipproto && t->parms.proto != 0)
408
                goto out;
409
 
410
        err = 0;
411
 
412
        switch (*type) {
413
                __u32 teli;
414
                struct ipv6_tlv_tnl_enc_lim *tel;
415
                __u32 mtu;
416
        case ICMPV6_DEST_UNREACH:
417
                if (net_ratelimit())
418
                        printk(KERN_WARNING
419
                               "%s: Path to destination invalid "
420
                               "or inactive!\n", t->parms.name);
421
                rel_msg = 1;
422
                break;
423
        case ICMPV6_TIME_EXCEED:
424
                if ((*code) == ICMPV6_EXC_HOPLIMIT) {
425
                        if (net_ratelimit())
426
                                printk(KERN_WARNING
427
                                       "%s: Too small hop limit or "
428
                                       "routing loop in tunnel!\n",
429
                                       t->parms.name);
430
                        rel_msg = 1;
431
                }
432
                break;
433
        case ICMPV6_PARAMPROB:
434
                teli = 0;
435
                if ((*code) == ICMPV6_HDR_FIELD)
436
                        teli = parse_tlv_tnl_enc_lim(skb, skb->data);
437
 
438
                if (teli && teli == *info - 2) {
439
                        tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
440
                        if (tel->encap_limit == 0) {
441
                                if (net_ratelimit())
442
                                        printk(KERN_WARNING
443
                                               "%s: Too small encapsulation "
444
                                               "limit or routing loop in "
445
                                               "tunnel!\n", t->parms.name);
446
                                rel_msg = 1;
447
                        }
448
                } else if (net_ratelimit()) {
449
                        printk(KERN_WARNING
450
                               "%s: Recipient unable to parse tunneled "
451
                               "packet!\n ", t->parms.name);
452
                }
453
                break;
454
        case ICMPV6_PKT_TOOBIG:
455
                mtu = *info - offset;
456
                if (mtu < IPV6_MIN_MTU)
457
                        mtu = IPV6_MIN_MTU;
458
                t->dev->mtu = mtu;
459
 
460
                if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) {
461
                        rel_type = ICMPV6_PKT_TOOBIG;
462
                        rel_code = 0;
463
                        rel_info = mtu;
464
                        rel_msg = 1;
465
                }
466
                break;
467
        }
468
 
469
        *type = rel_type;
470
        *code = rel_code;
471
        *info = rel_info;
472
        *msg = rel_msg;
473
 
474
out:
475
        read_unlock(&ip6_tnl_lock);
476
        return err;
477
}
478
 
479
static int
480
ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
481
           int type, int code, int offset, __be32 info)
482
{
483
        int rel_msg = 0;
484
        int rel_type = type;
485
        int rel_code = code;
486
        __u32 rel_info = ntohl(info);
487
        int err;
488
        struct sk_buff *skb2;
489
        struct iphdr *eiph;
490
        struct flowi fl;
491
        struct rtable *rt;
492
 
493
        err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
494
                          &rel_msg, &rel_info, offset);
495
        if (err < 0)
496
                return err;
497
 
498
        if (rel_msg == 0)
499
                return 0;
500
 
501
        switch (rel_type) {
502
        case ICMPV6_DEST_UNREACH:
503
                if (rel_code != ICMPV6_ADDR_UNREACH)
504
                        return 0;
505
                rel_type = ICMP_DEST_UNREACH;
506
                rel_code = ICMP_HOST_UNREACH;
507
                break;
508
        case ICMPV6_PKT_TOOBIG:
509
                if (rel_code != 0)
510
                        return 0;
511
                rel_type = ICMP_DEST_UNREACH;
512
                rel_code = ICMP_FRAG_NEEDED;
513
                break;
514
        default:
515
                return 0;
516
        }
517
 
518
        if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
519
                return 0;
520
 
521
        skb2 = skb_clone(skb, GFP_ATOMIC);
522
        if (!skb2)
523
                return 0;
524
 
525
        dst_release(skb2->dst);
526
        skb2->dst = NULL;
527
        skb_pull(skb2, offset);
528
        skb_reset_network_header(skb2);
529
        eiph = ip_hdr(skb2);
530
 
531
        /* Try to guess incoming interface */
532
        memset(&fl, 0, sizeof(fl));
533
        fl.fl4_dst = eiph->saddr;
534
        fl.fl4_tos = RT_TOS(eiph->tos);
535
        fl.proto = IPPROTO_IPIP;
536
        if (ip_route_output_key(&rt, &fl))
537
                goto out;
538
 
539
        skb2->dev = rt->u.dst.dev;
540
 
541
        /* route "incoming" packet */
542
        if (rt->rt_flags & RTCF_LOCAL) {
543
                ip_rt_put(rt);
544
                rt = NULL;
545
                fl.fl4_dst = eiph->daddr;
546
                fl.fl4_src = eiph->saddr;
547
                fl.fl4_tos = eiph->tos;
548
                if (ip_route_output_key(&rt, &fl) ||
549
                    rt->u.dst.dev->type != ARPHRD_TUNNEL) {
550
                        ip_rt_put(rt);
551
                        goto out;
552
                }
553
        } else {
554
                ip_rt_put(rt);
555
                if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
556
                                   skb2->dev) ||
557
                    skb2->dst->dev->type != ARPHRD_TUNNEL)
558
                        goto out;
559
        }
560
 
561
        /* change mtu on this route */
562
        if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
563
                if (rel_info > dst_mtu(skb2->dst))
564
                        goto out;
565
 
566
                skb2->dst->ops->update_pmtu(skb2->dst, rel_info);
567
        }
568
 
569
        icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
570
 
571
out:
572
        kfree_skb(skb2);
573
        return 0;
574
}
575
 
576
static int
577
ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
578
           int type, int code, int offset, __be32 info)
579
{
580
        int rel_msg = 0;
581
        int rel_type = type;
582
        int rel_code = code;
583
        __u32 rel_info = ntohl(info);
584
        int err;
585
 
586
        err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
587
                          &rel_msg, &rel_info, offset);
588
        if (err < 0)
589
                return err;
590
 
591
        if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
592
                struct rt6_info *rt;
593
                struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
594
 
595
                if (!skb2)
596
                        return 0;
597
 
598
                dst_release(skb2->dst);
599
                skb2->dst = NULL;
600
                skb_pull(skb2, offset);
601
                skb_reset_network_header(skb2);
602
 
603
                /* Try to guess incoming interface */
604
                rt = rt6_lookup(&ipv6_hdr(skb2)->saddr, NULL, 0, 0);
605
 
606
                if (rt && rt->rt6i_dev)
607
                        skb2->dev = rt->rt6i_dev;
608
 
609
                icmpv6_send(skb2, rel_type, rel_code, rel_info, skb2->dev);
610
 
611
                if (rt)
612
                        dst_release(&rt->u.dst);
613
 
614
                kfree_skb(skb2);
615
        }
616
 
617
        return 0;
618
}
619
 
620
static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
621
                                        struct ipv6hdr *ipv6h,
622
                                        struct sk_buff *skb)
623
{
624
        __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
625
 
626
        if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
627
                ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
628
 
629
        if (INET_ECN_is_ce(dsfield))
630
                IP_ECN_set_ce(ip_hdr(skb));
631
}
632
 
633
static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
634
                                        struct ipv6hdr *ipv6h,
635
                                        struct sk_buff *skb)
636
{
637
        if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
638
                ipv6_copy_dscp(ipv6h, ipv6_hdr(skb));
639
 
640
        if (INET_ECN_is_ce(ipv6_get_dsfield(ipv6h)))
641
                IP6_ECN_set_ce(ipv6_hdr(skb));
642
}
643
 
644
static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
645
{
646
        struct ip6_tnl_parm *p = &t->parms;
647
        int ret = 0;
648
 
649
        if (p->flags & IP6_TNL_F_CAP_RCV) {
650
                struct net_device *ldev = NULL;
651
 
652
                if (p->link)
653
                        ldev = dev_get_by_index(&init_net, p->link);
654
 
655
                if ((ipv6_addr_is_multicast(&p->laddr) ||
656
                     likely(ipv6_chk_addr(&p->laddr, ldev, 0))) &&
657
                    likely(!ipv6_chk_addr(&p->raddr, NULL, 0)))
658
                        ret = 1;
659
 
660
                if (ldev)
661
                        dev_put(ldev);
662
        }
663
        return ret;
664
}
665
 
666
/**
667
 * ip6_tnl_rcv - decapsulate IPv6 packet and retransmit it locally
668
 *   @skb: received socket buffer
669
 *   @protocol: ethernet protocol ID
670
 *   @dscp_ecn_decapsulate: the function to decapsulate DSCP code and ECN
671
 *
672
 * Return: 0
673
 **/
674
 
675
static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
676
                       __u8 ipproto,
677
                       void (*dscp_ecn_decapsulate)(struct ip6_tnl *t,
678
                                                    struct ipv6hdr *ipv6h,
679
                                                    struct sk_buff *skb))
680
{
681
        struct ip6_tnl *t;
682
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
683
 
684
        read_lock(&ip6_tnl_lock);
685
 
686
        if ((t = ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
687
                if (t->parms.proto != ipproto && t->parms.proto != 0) {
688
                        read_unlock(&ip6_tnl_lock);
689
                        goto discard;
690
                }
691
 
692
                if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
693
                        read_unlock(&ip6_tnl_lock);
694
                        goto discard;
695
                }
696
 
697
                if (!ip6_tnl_rcv_ctl(t)) {
698
                        t->stat.rx_dropped++;
699
                        read_unlock(&ip6_tnl_lock);
700
                        goto discard;
701
                }
702
                secpath_reset(skb);
703
                skb->mac_header = skb->network_header;
704
                skb_reset_network_header(skb);
705
                skb->protocol = htons(protocol);
706
                skb->pkt_type = PACKET_HOST;
707
                memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
708
                skb->dev = t->dev;
709
                dst_release(skb->dst);
710
                skb->dst = NULL;
711
                nf_reset(skb);
712
 
713
                dscp_ecn_decapsulate(t, ipv6h, skb);
714
 
715
                t->stat.rx_packets++;
716
                t->stat.rx_bytes += skb->len;
717
                netif_rx(skb);
718
                read_unlock(&ip6_tnl_lock);
719
                return 0;
720
        }
721
        read_unlock(&ip6_tnl_lock);
722
        return 1;
723
 
724
discard:
725
        kfree_skb(skb);
726
        return 0;
727
}
728
 
729
static int ip4ip6_rcv(struct sk_buff *skb)
730
{
731
        return ip6_tnl_rcv(skb, ETH_P_IP, IPPROTO_IPIP,
732
                           ip4ip6_dscp_ecn_decapsulate);
733
}
734
 
735
static int ip6ip6_rcv(struct sk_buff *skb)
736
{
737
        return ip6_tnl_rcv(skb, ETH_P_IPV6, IPPROTO_IPV6,
738
                           ip6ip6_dscp_ecn_decapsulate);
739
}
740
 
741
struct ipv6_tel_txoption {
742
        struct ipv6_txoptions ops;
743
        __u8 dst_opt[8];
744
};
745
 
746
static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
747
{
748
        memset(opt, 0, sizeof(struct ipv6_tel_txoption));
749
 
750
        opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
751
        opt->dst_opt[3] = 1;
752
        opt->dst_opt[4] = encap_limit;
753
        opt->dst_opt[5] = IPV6_TLV_PADN;
754
        opt->dst_opt[6] = 1;
755
 
756
        opt->ops.dst0opt = (struct ipv6_opt_hdr *) opt->dst_opt;
757
        opt->ops.opt_nflen = 8;
758
}
759
 
760
/**
761
 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
762
 *   @t: the outgoing tunnel device
763
 *   @hdr: IPv6 header from the incoming packet
764
 *
765
 * Description:
766
 *   Avoid trivial tunneling loop by checking that tunnel exit-point
767
 *   doesn't match source of incoming packet.
768
 *
769
 * Return:
770
 *   1 if conflict,
771
 *   0 else
772
 **/
773
 
774
static inline int
775
ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr)
776
{
777
        return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
778
}
779
 
780
static inline int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
781
{
782
        struct ip6_tnl_parm *p = &t->parms;
783
        int ret = 0;
784
 
785
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
786
                struct net_device *ldev = NULL;
787
 
788
                if (p->link)
789
                        ldev = dev_get_by_index(&init_net, p->link);
790
 
791
                if (unlikely(!ipv6_chk_addr(&p->laddr, ldev, 0)))
792
                        printk(KERN_WARNING
793
                               "%s xmit: Local address not yet configured!\n",
794
                               p->name);
795
                else if (!ipv6_addr_is_multicast(&p->raddr) &&
796
                         unlikely(ipv6_chk_addr(&p->raddr, NULL, 0)))
797
                        printk(KERN_WARNING
798
                               "%s xmit: Routing loop! "
799
                               "Remote address found on this node!\n",
800
                               p->name);
801
                else
802
                        ret = 1;
803
                if (ldev)
804
                        dev_put(ldev);
805
        }
806
        return ret;
807
}
808
/**
809
 * ip6_tnl_xmit2 - encapsulate packet and send
810
 *   @skb: the outgoing socket buffer
811
 *   @dev: the outgoing tunnel device
812
 *   @dsfield: dscp code for outer header
813
 *   @fl: flow of tunneled packet
814
 *   @encap_limit: encapsulation limit
815
 *   @pmtu: Path MTU is stored if packet is too big
816
 *
817
 * Description:
818
 *   Build new header and do some sanity checks on the packet before sending
819
 *   it.
820
 *
821
 * Return:
822
 *   0 on success
823
 *   -1 fail
824
 *   %-EMSGSIZE message too big. return mtu in this case.
825
 **/
826
 
827
static int ip6_tnl_xmit2(struct sk_buff *skb,
828
                         struct net_device *dev,
829
                         __u8 dsfield,
830
                         struct flowi *fl,
831
                         int encap_limit,
832
                         __u32 *pmtu)
833
{
834
        struct ip6_tnl *t = netdev_priv(dev);
835
        struct net_device_stats *stats = &t->stat;
836
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
837
        struct ipv6_tel_txoption opt;
838
        struct dst_entry *dst;
839
        struct net_device *tdev;
840
        int mtu;
841
        unsigned int max_headroom = sizeof(struct ipv6hdr);
842
        u8 proto;
843
        int err = -1;
844
        int pkt_len;
845
 
846
        if ((dst = ip6_tnl_dst_check(t)) != NULL)
847
                dst_hold(dst);
848
        else {
849
                dst = ip6_route_output(NULL, fl);
850
 
851
                if (dst->error || xfrm_lookup(&dst, fl, NULL, 0) < 0)
852
                        goto tx_err_link_failure;
853
        }
854
 
855
        tdev = dst->dev;
856
 
857
        if (tdev == dev) {
858
                stats->collisions++;
859
                if (net_ratelimit())
860
                        printk(KERN_WARNING
861
                               "%s: Local routing loop detected!\n",
862
                               t->parms.name);
863
                goto tx_err_dst_release;
864
        }
865
        mtu = dst_mtu(dst) - sizeof (*ipv6h);
866
        if (encap_limit >= 0) {
867
                max_headroom += 8;
868
                mtu -= 8;
869
        }
870
        if (mtu < IPV6_MIN_MTU)
871
                mtu = IPV6_MIN_MTU;
872
        if (skb->dst)
873
                skb->dst->ops->update_pmtu(skb->dst, mtu);
874
        if (skb->len > mtu) {
875
                *pmtu = mtu;
876
                err = -EMSGSIZE;
877
                goto tx_err_dst_release;
878
        }
879
 
880
        /*
881
         * Okay, now see if we can stuff it in the buffer as-is.
882
         */
883
        max_headroom += LL_RESERVED_SPACE(tdev);
884
 
885
        if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
886
            (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
887
                struct sk_buff *new_skb;
888
 
889
                if (!(new_skb = skb_realloc_headroom(skb, max_headroom)))
890
                        goto tx_err_dst_release;
891
 
892
                if (skb->sk)
893
                        skb_set_owner_w(new_skb, skb->sk);
894
                kfree_skb(skb);
895
                skb = new_skb;
896
        }
897
        dst_release(skb->dst);
898
        skb->dst = dst_clone(dst);
899
 
900
        skb->transport_header = skb->network_header;
901
 
902
        proto = fl->proto;
903
        if (encap_limit >= 0) {
904
                init_tel_txopt(&opt, encap_limit);
905
                ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
906
        }
907
        skb_push(skb, sizeof(struct ipv6hdr));
908
        skb_reset_network_header(skb);
909
        ipv6h = ipv6_hdr(skb);
910
        *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000);
911
        dsfield = INET_ECN_encapsulate(0, dsfield);
912
        ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
913
        ipv6h->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
914
        ipv6h->hop_limit = t->parms.hop_limit;
915
        ipv6h->nexthdr = proto;
916
        ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src);
917
        ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst);
918
        nf_reset(skb);
919
        pkt_len = skb->len;
920
        err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL,
921
                      skb->dst->dev, dst_output);
922
 
923
        if (net_xmit_eval(err) == 0) {
924
                stats->tx_bytes += pkt_len;
925
                stats->tx_packets++;
926
        } else {
927
                stats->tx_errors++;
928
                stats->tx_aborted_errors++;
929
        }
930
        ip6_tnl_dst_store(t, dst);
931
        return 0;
932
tx_err_link_failure:
933
        stats->tx_carrier_errors++;
934
        dst_link_failure(skb);
935
tx_err_dst_release:
936
        dst_release(dst);
937
        return err;
938
}
939
 
940
static inline int
941
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
942
{
943
        struct ip6_tnl *t = netdev_priv(dev);
944
        struct iphdr  *iph = ip_hdr(skb);
945
        int encap_limit = -1;
946
        struct flowi fl;
947
        __u8 dsfield;
948
        __u32 mtu;
949
        int err;
950
 
951
        if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) ||
952
            !ip6_tnl_xmit_ctl(t))
953
                return -1;
954
 
955
        if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
956
                encap_limit = t->parms.encap_limit;
957
 
958
        memcpy(&fl, &t->fl, sizeof (fl));
959
        fl.proto = IPPROTO_IPIP;
960
 
961
        dsfield = ipv4_get_dsfield(iph);
962
 
963
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
964
                fl.fl6_flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
965
                                          & IPV6_TCLASS_MASK;
966
 
967
        err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
968
        if (err != 0) {
969
                /* XXX: send ICMP error even if DF is not set. */
970
                if (err == -EMSGSIZE)
971
                        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
972
                                  htonl(mtu));
973
                return -1;
974
        }
975
 
976
        return 0;
977
}
978
 
979
static inline int
980
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
981
{
982
        struct ip6_tnl *t = netdev_priv(dev);
983
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
984
        int encap_limit = -1;
985
        __u16 offset;
986
        struct flowi fl;
987
        __u8 dsfield;
988
        __u32 mtu;
989
        int err;
990
 
991
        if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
992
            !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h))
993
                return -1;
994
 
995
        offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb));
996
        if (offset > 0) {
997
                struct ipv6_tlv_tnl_enc_lim *tel;
998
                tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
999
                if (tel->encap_limit == 0) {
1000
                        icmpv6_send(skb, ICMPV6_PARAMPROB,
1001
                                    ICMPV6_HDR_FIELD, offset + 2, skb->dev);
1002
                        return -1;
1003
                }
1004
                encap_limit = tel->encap_limit - 1;
1005
        } else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1006
                encap_limit = t->parms.encap_limit;
1007
 
1008
        memcpy(&fl, &t->fl, sizeof (fl));
1009
        fl.proto = IPPROTO_IPV6;
1010
 
1011
        dsfield = ipv6_get_dsfield(ipv6h);
1012
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS))
1013
                fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
1014
        if ((t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL))
1015
                fl.fl6_flowlabel |= (*(__be32 *) ipv6h & IPV6_FLOWLABEL_MASK);
1016
 
1017
        err = ip6_tnl_xmit2(skb, dev, dsfield, &fl, encap_limit, &mtu);
1018
        if (err != 0) {
1019
                if (err == -EMSGSIZE)
1020
                        icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
1021
                return -1;
1022
        }
1023
 
1024
        return 0;
1025
}
1026
 
1027
static int
1028
ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1029
{
1030
        struct ip6_tnl *t = netdev_priv(dev);
1031
        struct net_device_stats *stats = &t->stat;
1032
        int ret;
1033
 
1034
        if (t->recursion++) {
1035
                t->stat.collisions++;
1036
                goto tx_err;
1037
        }
1038
 
1039
        switch (skb->protocol) {
1040
        case __constant_htons(ETH_P_IP):
1041
                ret = ip4ip6_tnl_xmit(skb, dev);
1042
                break;
1043
        case __constant_htons(ETH_P_IPV6):
1044
                ret = ip6ip6_tnl_xmit(skb, dev);
1045
                break;
1046
        default:
1047
                goto tx_err;
1048
        }
1049
 
1050
        if (ret < 0)
1051
                goto tx_err;
1052
 
1053
        t->recursion--;
1054
        return 0;
1055
 
1056
tx_err:
1057
        stats->tx_errors++;
1058
        stats->tx_dropped++;
1059
        kfree_skb(skb);
1060
        t->recursion--;
1061
        return 0;
1062
}
1063
 
1064
static void ip6_tnl_set_cap(struct ip6_tnl *t)
1065
{
1066
        struct ip6_tnl_parm *p = &t->parms;
1067
        int ltype = ipv6_addr_type(&p->laddr);
1068
        int rtype = ipv6_addr_type(&p->raddr);
1069
 
1070
        p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
1071
 
1072
        if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1073
            rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
1074
            !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
1075
            (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
1076
                if (ltype&IPV6_ADDR_UNICAST)
1077
                        p->flags |= IP6_TNL_F_CAP_XMIT;
1078
                if (rtype&IPV6_ADDR_UNICAST)
1079
                        p->flags |= IP6_TNL_F_CAP_RCV;
1080
        }
1081
}
1082
 
1083
static void ip6_tnl_link_config(struct ip6_tnl *t)
1084
{
1085
        struct net_device *dev = t->dev;
1086
        struct ip6_tnl_parm *p = &t->parms;
1087
        struct flowi *fl = &t->fl;
1088
 
1089
        memcpy(&dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1090
        memcpy(&dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1091
 
1092
        /* Set up flowi template */
1093
        ipv6_addr_copy(&fl->fl6_src, &p->laddr);
1094
        ipv6_addr_copy(&fl->fl6_dst, &p->raddr);
1095
        fl->oif = p->link;
1096
        fl->fl6_flowlabel = 0;
1097
 
1098
        if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1099
                fl->fl6_flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1100
        if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1101
                fl->fl6_flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1102
 
1103
        ip6_tnl_set_cap(t);
1104
 
1105
        if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1106
                dev->flags |= IFF_POINTOPOINT;
1107
        else
1108
                dev->flags &= ~IFF_POINTOPOINT;
1109
 
1110
        dev->iflink = p->link;
1111
 
1112
        if (p->flags & IP6_TNL_F_CAP_XMIT) {
1113
                int strict = (ipv6_addr_type(&p->raddr) &
1114
                              (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1115
 
1116
                struct rt6_info *rt = rt6_lookup(&p->raddr, &p->laddr,
1117
                                                 p->link, strict);
1118
 
1119
                if (rt == NULL)
1120
                        return;
1121
 
1122
                if (rt->rt6i_dev) {
1123
                        dev->hard_header_len = rt->rt6i_dev->hard_header_len +
1124
                                sizeof (struct ipv6hdr);
1125
 
1126
                        dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
1127
 
1128
                        if (dev->mtu < IPV6_MIN_MTU)
1129
                                dev->mtu = IPV6_MIN_MTU;
1130
                }
1131
                dst_release(&rt->u.dst);
1132
        }
1133
}
1134
 
1135
/**
1136
 * ip6_tnl_change - update the tunnel parameters
1137
 *   @t: tunnel to be changed
1138
 *   @p: tunnel configuration parameters
1139
 *   @active: != 0 if tunnel is ready for use
1140
 *
1141
 * Description:
1142
 *   ip6_tnl_change() updates the tunnel parameters
1143
 **/
1144
 
1145
static int
1146
ip6_tnl_change(struct ip6_tnl *t, struct ip6_tnl_parm *p)
1147
{
1148
        ipv6_addr_copy(&t->parms.laddr, &p->laddr);
1149
        ipv6_addr_copy(&t->parms.raddr, &p->raddr);
1150
        t->parms.flags = p->flags;
1151
        t->parms.hop_limit = p->hop_limit;
1152
        t->parms.encap_limit = p->encap_limit;
1153
        t->parms.flowinfo = p->flowinfo;
1154
        t->parms.link = p->link;
1155
        t->parms.proto = p->proto;
1156
        ip6_tnl_dst_reset(t);
1157
        ip6_tnl_link_config(t);
1158
        return 0;
1159
}
1160
 
1161
/**
1162
 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1163
 *   @dev: virtual device associated with tunnel
1164
 *   @ifr: parameters passed from userspace
1165
 *   @cmd: command to be performed
1166
 *
1167
 * Description:
1168
 *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1169
 *   from userspace.
1170
 *
1171
 *   The possible commands are the following:
1172
 *     %SIOCGETTUNNEL: get tunnel parameters for device
1173
 *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1174
 *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1175
 *     %SIOCDELTUNNEL: delete tunnel
1176
 *
1177
 *   The fallback device "ip6tnl0", created during module
1178
 *   initialization, can be used for creating other tunnel devices.
1179
 *
1180
 * Return:
1181
 *   0 on success,
1182
 *   %-EFAULT if unable to copy data to or from userspace,
1183
 *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1184
 *   %-EINVAL if passed tunnel parameters are invalid,
1185
 *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1186
 *   %-ENODEV if attempting to change or delete a nonexisting device
1187
 **/
1188
 
1189
static int
1190
ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1191
{
1192
        int err = 0;
1193
        struct ip6_tnl_parm p;
1194
        struct ip6_tnl *t = NULL;
1195
 
1196
        switch (cmd) {
1197
        case SIOCGETTUNNEL:
1198
                if (dev == ip6_fb_tnl_dev) {
1199
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p))) {
1200
                                err = -EFAULT;
1201
                                break;
1202
                        }
1203
                        t = ip6_tnl_locate(&p, 0);
1204
                }
1205
                if (t == NULL)
1206
                        t = netdev_priv(dev);
1207
                memcpy(&p, &t->parms, sizeof (p));
1208
                if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof (p))) {
1209
                        err = -EFAULT;
1210
                }
1211
                break;
1212
        case SIOCADDTUNNEL:
1213
        case SIOCCHGTUNNEL:
1214
                err = -EPERM;
1215
                if (!capable(CAP_NET_ADMIN))
1216
                        break;
1217
                err = -EFAULT;
1218
                if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1219
                        break;
1220
                err = -EINVAL;
1221
                if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1222
                    p.proto != 0)
1223
                        break;
1224
                t = ip6_tnl_locate(&p, cmd == SIOCADDTUNNEL);
1225
                if (dev != ip6_fb_tnl_dev && cmd == SIOCCHGTUNNEL) {
1226
                        if (t != NULL) {
1227
                                if (t->dev != dev) {
1228
                                        err = -EEXIST;
1229
                                        break;
1230
                                }
1231
                        } else
1232
                                t = netdev_priv(dev);
1233
 
1234
                        ip6_tnl_unlink(t);
1235
                        err = ip6_tnl_change(t, &p);
1236
                        ip6_tnl_link(t);
1237
                        netdev_state_change(dev);
1238
                }
1239
                if (t) {
1240
                        err = 0;
1241
                        if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof (p)))
1242
                                err = -EFAULT;
1243
 
1244
                } else
1245
                        err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1246
                break;
1247
        case SIOCDELTUNNEL:
1248
                err = -EPERM;
1249
                if (!capable(CAP_NET_ADMIN))
1250
                        break;
1251
 
1252
                if (dev == ip6_fb_tnl_dev) {
1253
                        err = -EFAULT;
1254
                        if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof (p)))
1255
                                break;
1256
                        err = -ENOENT;
1257
                        if ((t = ip6_tnl_locate(&p, 0)) == NULL)
1258
                                break;
1259
                        err = -EPERM;
1260
                        if (t->dev == ip6_fb_tnl_dev)
1261
                                break;
1262
                        dev = t->dev;
1263
                }
1264
                err = 0;
1265
                unregister_netdevice(dev);
1266
                break;
1267
        default:
1268
                err = -EINVAL;
1269
        }
1270
        return err;
1271
}
1272
 
1273
/**
1274
 * ip6_tnl_get_stats - return the stats for tunnel device
1275
 *   @dev: virtual device associated with tunnel
1276
 *
1277
 * Return: stats for device
1278
 **/
1279
 
1280
static struct net_device_stats *
1281
ip6_tnl_get_stats(struct net_device *dev)
1282
{
1283
        return &(((struct ip6_tnl *)netdev_priv(dev))->stat);
1284
}
1285
 
1286
/**
1287
 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1288
 *   @dev: virtual device associated with tunnel
1289
 *   @new_mtu: the new mtu
1290
 *
1291
 * Return:
1292
 *   0 on success,
1293
 *   %-EINVAL if mtu too small
1294
 **/
1295
 
1296
static int
1297
ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1298
{
1299
        if (new_mtu < IPV6_MIN_MTU) {
1300
                return -EINVAL;
1301
        }
1302
        dev->mtu = new_mtu;
1303
        return 0;
1304
}
1305
 
1306
/**
1307
 * ip6_tnl_dev_setup - setup virtual tunnel device
1308
 *   @dev: virtual device associated with tunnel
1309
 *
1310
 * Description:
1311
 *   Initialize function pointers and device parameters
1312
 **/
1313
 
1314
static void ip6_tnl_dev_setup(struct net_device *dev)
1315
{
1316
        dev->uninit = ip6_tnl_dev_uninit;
1317
        dev->destructor = free_netdev;
1318
        dev->hard_start_xmit = ip6_tnl_xmit;
1319
        dev->get_stats = ip6_tnl_get_stats;
1320
        dev->do_ioctl = ip6_tnl_ioctl;
1321
        dev->change_mtu = ip6_tnl_change_mtu;
1322
 
1323
        dev->type = ARPHRD_TUNNEL6;
1324
        dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
1325
        dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
1326
        dev->flags |= IFF_NOARP;
1327
        dev->addr_len = sizeof(struct in6_addr);
1328
}
1329
 
1330
 
1331
/**
1332
 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1333
 *   @dev: virtual device associated with tunnel
1334
 **/
1335
 
1336
static inline void
1337
ip6_tnl_dev_init_gen(struct net_device *dev)
1338
{
1339
        struct ip6_tnl *t = netdev_priv(dev);
1340
        t->dev = dev;
1341
        strcpy(t->parms.name, dev->name);
1342
}
1343
 
1344
/**
1345
 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1346
 *   @dev: virtual device associated with tunnel
1347
 **/
1348
 
1349
static int
1350
ip6_tnl_dev_init(struct net_device *dev)
1351
{
1352
        struct ip6_tnl *t = netdev_priv(dev);
1353
        ip6_tnl_dev_init_gen(dev);
1354
        ip6_tnl_link_config(t);
1355
        return 0;
1356
}
1357
 
1358
/**
1359
 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1360
 *   @dev: fallback device
1361
 *
1362
 * Return: 0
1363
 **/
1364
 
1365
static int
1366
ip6_fb_tnl_dev_init(struct net_device *dev)
1367
{
1368
        struct ip6_tnl *t = netdev_priv(dev);
1369
        ip6_tnl_dev_init_gen(dev);
1370
        t->parms.proto = IPPROTO_IPV6;
1371
        dev_hold(dev);
1372
        tnls_wc[0] = t;
1373
        return 0;
1374
}
1375
 
1376
static struct xfrm6_tunnel ip4ip6_handler = {
1377
        .handler        = ip4ip6_rcv,
1378
        .err_handler    = ip4ip6_err,
1379
        .priority       =       1,
1380
};
1381
 
1382
static struct xfrm6_tunnel ip6ip6_handler = {
1383
        .handler        = ip6ip6_rcv,
1384
        .err_handler    = ip6ip6_err,
1385
        .priority       =       1,
1386
};
1387
 
1388
/**
1389
 * ip6_tunnel_init - register protocol and reserve needed resources
1390
 *
1391
 * Return: 0 on success
1392
 **/
1393
 
1394
static int __init ip6_tunnel_init(void)
1395
{
1396
        int  err;
1397
 
1398
        if (xfrm6_tunnel_register(&ip4ip6_handler, AF_INET)) {
1399
                printk(KERN_ERR "ip6_tunnel init: can't register ip4ip6\n");
1400
                err = -EAGAIN;
1401
                goto out;
1402
        }
1403
 
1404
        if (xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6)) {
1405
                printk(KERN_ERR "ip6_tunnel init: can't register ip6ip6\n");
1406
                err = -EAGAIN;
1407
                goto unreg_ip4ip6;
1408
        }
1409
        ip6_fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
1410
                                      ip6_tnl_dev_setup);
1411
 
1412
        if (!ip6_fb_tnl_dev) {
1413
                err = -ENOMEM;
1414
                goto fail;
1415
        }
1416
        ip6_fb_tnl_dev->init = ip6_fb_tnl_dev_init;
1417
 
1418
        if ((err = register_netdev(ip6_fb_tnl_dev))) {
1419
                free_netdev(ip6_fb_tnl_dev);
1420
                goto fail;
1421
        }
1422
        return 0;
1423
fail:
1424
        xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
1425
unreg_ip4ip6:
1426
        xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
1427
out:
1428
        return err;
1429
}
1430
 
1431
static void __exit ip6_tnl_destroy_tunnels(void)
1432
{
1433
        int h;
1434
        struct ip6_tnl *t;
1435
 
1436
        for (h = 0; h < HASH_SIZE; h++) {
1437
                while ((t = tnls_r_l[h]) != NULL)
1438
                        unregister_netdevice(t->dev);
1439
        }
1440
 
1441
        t = tnls_wc[0];
1442
        unregister_netdevice(t->dev);
1443
}
1444
 
1445
/**
1446
 * ip6_tunnel_cleanup - free resources and unregister protocol
1447
 **/
1448
 
1449
static void __exit ip6_tunnel_cleanup(void)
1450
{
1451
        if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
1452
                printk(KERN_INFO "ip6_tunnel close: can't deregister ip4ip6\n");
1453
 
1454
        if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
1455
                printk(KERN_INFO "ip6_tunnel close: can't deregister ip6ip6\n");
1456
 
1457
        rtnl_lock();
1458
        ip6_tnl_destroy_tunnels();
1459
        rtnl_unlock();
1460
}
1461
 
1462
module_init(ip6_tunnel_init);
1463
module_exit(ip6_tunnel_cleanup);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.