OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [net/] [netlink/] [af_netlink.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
 * NETLINK      Kernel-user communication protocol.
3
 *
4
 *              Authors:        Alan Cox <alan@redhat.com>
5
 *                              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6
 *
7
 *              This program is free software; you can redistribute it and/or
8
 *              modify it under the terms of the GNU General Public License
9
 *              as published by the Free Software Foundation; either version
10
 *              2 of the License, or (at your option) any later version.
11
 *
12
 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13
 *                               added netlink_proto_exit
14
 *
15
 */
16
 
17
#include <linux/config.h>
18
#include <linux/module.h>
19
 
20
#include <linux/kernel.h>
21
#include <linux/init.h>
22
#include <linux/major.h>
23
#include <linux/signal.h>
24
#include <linux/sched.h>
25
#include <linux/errno.h>
26
#include <linux/string.h>
27
#include <linux/stat.h>
28
#include <linux/socket.h>
29
#include <linux/un.h>
30
#include <linux/fcntl.h>
31
#include <linux/termios.h>
32
#include <linux/sockios.h>
33
#include <linux/net.h>
34
#include <linux/fs.h>
35
#include <linux/slab.h>
36
#include <asm/uaccess.h>
37
#include <linux/skbuff.h>
38
#include <linux/netdevice.h>
39
#include <linux/rtnetlink.h>
40
#include <linux/proc_fs.h>
41
#include <linux/smp_lock.h>
42
#include <linux/notifier.h>
43
#include <net/sock.h>
44
#include <net/scm.h>
45
 
46
#define Nprintk(a...)
47
 
48
#if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
49
#define NL_EMULATE_DEV
50
#endif
51
 
52
struct netlink_opt
53
{
54
        u32                     pid;
55
        unsigned                groups;
56
        u32                     dst_pid;
57
        unsigned                dst_groups;
58
        unsigned long           state;
59
        int                     (*handler)(int unit, struct sk_buff *skb);
60
        wait_queue_head_t       wait;
61
        struct netlink_callback *cb;
62
        spinlock_t              cb_lock;
63
        void                    (*data_ready)(struct sock *sk, int bytes);
64
};
65
 
66
static struct sock *nl_table[MAX_LINKS];
67
static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
68
static unsigned nl_nonroot[MAX_LINKS];
69
 
70
#ifdef NL_EMULATE_DEV
71
static struct socket *netlink_kernel[MAX_LINKS];
72
#endif
73
 
74
static int netlink_dump(struct sock *sk);
75
static void netlink_destroy_callback(struct netlink_callback *cb);
76
 
77
atomic_t netlink_sock_nr;
78
 
79
static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
80
static atomic_t nl_table_users = ATOMIC_INIT(0);
81
 
82
static struct notifier_block *netlink_chain;
83
 
84
static void netlink_sock_destruct(struct sock *sk)
85
{
86
        skb_queue_purge(&sk->receive_queue);
87
 
88
        if (!sk->dead) {
89
                printk("Freeing alive netlink socket %p\n", sk);
90
                return;
91
        }
92
        BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
93
        BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
94
        BUG_TRAP(sk->protinfo.af_netlink->cb==NULL);
95
 
96
        kfree(sk->protinfo.af_netlink);
97
 
98
        atomic_dec(&netlink_sock_nr);
99
#ifdef NETLINK_REFCNT_DEBUG
100
        printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
101
#endif
102
}
103
 
104
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
105
 * Look, when several writers sleep and reader wakes them up, all but one
106
 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
107
 * this, _but_ remember, it adds useless work on UP machines.
108
 */
109
 
110
static void netlink_table_grab(void)
111
{
112
        write_lock_bh(&nl_table_lock);
113
 
114
        if (atomic_read(&nl_table_users)) {
115
                DECLARE_WAITQUEUE(wait, current);
116
 
117
                add_wait_queue_exclusive(&nl_table_wait, &wait);
118
                for(;;) {
119
                        set_current_state(TASK_UNINTERRUPTIBLE);
120
                        if (atomic_read(&nl_table_users) == 0)
121
                                break;
122
                        write_unlock_bh(&nl_table_lock);
123
                        schedule();
124
                        write_lock_bh(&nl_table_lock);
125
                }
126
 
127
                __set_current_state(TASK_RUNNING);
128
                remove_wait_queue(&nl_table_wait, &wait);
129
        }
130
}
131
 
132
static __inline__ void netlink_table_ungrab(void)
133
{
134
        write_unlock_bh(&nl_table_lock);
135
        wake_up(&nl_table_wait);
136
}
137
 
138
static __inline__ void
139
netlink_lock_table(void)
140
{
141
        /* read_lock() synchronizes us to netlink_table_grab */
142
 
143
        read_lock(&nl_table_lock);
144
        atomic_inc(&nl_table_users);
145
        read_unlock(&nl_table_lock);
146
}
147
 
148
static __inline__ void
149
netlink_unlock_table(void)
150
{
151
        if (atomic_dec_and_test(&nl_table_users))
152
                wake_up(&nl_table_wait);
153
}
154
 
155
static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
156
{
157
        struct sock *sk;
158
 
159
        read_lock(&nl_table_lock);
160
        for (sk=nl_table[protocol]; sk; sk=sk->next) {
161
                if (sk->protinfo.af_netlink->pid == pid) {
162
                        sock_hold(sk);
163
                        read_unlock(&nl_table_lock);
164
                        return sk;
165
                }
166
        }
167
 
168
        read_unlock(&nl_table_lock);
169
        return NULL;
170
}
171
 
172
extern struct proto_ops netlink_ops;
173
 
174
static int netlink_insert(struct sock *sk, u32 pid)
175
{
176
        int err = -EADDRINUSE;
177
        struct sock *osk;
178
 
179
        netlink_table_grab();
180
        for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
181
                if (osk->protinfo.af_netlink->pid == pid)
182
                        break;
183
        }
184
        if (osk == NULL) {
185
                err = -EBUSY;
186
                if (sk->protinfo.af_netlink->pid == 0) {
187
                        sk->protinfo.af_netlink->pid = pid;
188
                        sk->next = nl_table[sk->protocol];
189
                        nl_table[sk->protocol] = sk;
190
                        sock_hold(sk);
191
                        err = 0;
192
                }
193
        }
194
        netlink_table_ungrab();
195
        return err;
196
}
197
 
198
static void netlink_remove(struct sock *sk)
199
{
200
        struct sock **skp;
201
 
202
        netlink_table_grab();
203
        for (skp = &nl_table[sk->protocol]; *skp; skp = &((*skp)->next)) {
204
                if (*skp == sk) {
205
                        *skp = sk->next;
206
                        __sock_put(sk);
207
                        break;
208
                }
209
        }
210
        netlink_table_ungrab();
211
}
212
 
213
static int netlink_create(struct socket *sock, int protocol)
214
{
215
        struct sock *sk;
216
 
217
        sock->state = SS_UNCONNECTED;
218
 
219
        if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
220
                return -ESOCKTNOSUPPORT;
221
 
222
        if (protocol<0 || protocol >= MAX_LINKS)
223
                return -EPROTONOSUPPORT;
224
 
225
        sock->ops = &netlink_ops;
226
 
227
        sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1);
228
        if (!sk)
229
                return -ENOMEM;
230
 
231
        sock_init_data(sock,sk);
232
 
233
        sk->protinfo.af_netlink = kmalloc(sizeof(struct netlink_opt), GFP_KERNEL);
234
        if (sk->protinfo.af_netlink == NULL) {
235
                sk_free(sk);
236
                return -ENOMEM;
237
        }
238
        memset(sk->protinfo.af_netlink, 0, sizeof(struct netlink_opt));
239
 
240
        spin_lock_init(&sk->protinfo.af_netlink->cb_lock);
241
        init_waitqueue_head(&sk->protinfo.af_netlink->wait);
242
        sk->destruct = netlink_sock_destruct;
243
        atomic_inc(&netlink_sock_nr);
244
 
245
        sk->protocol=protocol;
246
        return 0;
247
}
248
 
249
static int netlink_release(struct socket *sock)
250
{
251
        struct sock *sk = sock->sk;
252
 
253
        if (!sk)
254
                return 0;
255
 
256
        netlink_remove(sk);
257
 
258
        spin_lock(&sk->protinfo.af_netlink->cb_lock);
259
        if (sk->protinfo.af_netlink->cb) {
260
                sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
261
                netlink_destroy_callback(sk->protinfo.af_netlink->cb);
262
                sk->protinfo.af_netlink->cb = NULL;
263
                __sock_put(sk);
264
        }
265
        spin_unlock(&sk->protinfo.af_netlink->cb_lock);
266
 
267
        /* OK. Socket is unlinked, and, therefore,
268
           no new packets will arrive */
269
 
270
        sock_orphan(sk);
271
        sock->sk = NULL;
272
        wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);
273
 
274
        skb_queue_purge(&sk->write_queue);
275
 
276
        if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) {
277
                struct netlink_notify n = { protocol:sk->protocol,
278
                                            pid:sk->protinfo.af_netlink->pid };
279
                notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
280
        }
281
 
282
        sock_put(sk);
283
        return 0;
284
}
285
 
286
static int netlink_autobind(struct socket *sock)
287
{
288
        struct sock *sk = sock->sk;
289
        struct sock *osk;
290
        s32 pid = current->pid;
291
        int err;
292
 
293
retry:
294
        netlink_table_grab();
295
        for (osk=nl_table[sk->protocol]; osk; osk=osk->next) {
296
                if (osk->protinfo.af_netlink->pid == pid) {
297
                        /* Bind collision, search negative pid values. */
298
                        if (pid > 0)
299
                                pid = -4096;
300
                        pid--;
301
                        netlink_table_ungrab();
302
                        goto retry;
303
                }
304
        }
305
        netlink_table_ungrab();
306
 
307
        err = netlink_insert(sk, pid);
308
        if (err == -EADDRINUSE)
309
                goto retry;
310
        sk->protinfo.af_netlink->groups = 0;
311
        return 0;
312
}
313
 
314
static inline int netlink_capable(struct socket *sock, unsigned flag)
315
{
316
        return (nl_nonroot[sock->sk->protocol] & flag) || capable(CAP_NET_ADMIN);
317
}
318
 
319
static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
320
{
321
        struct sock *sk = sock->sk;
322
        int err;
323
        struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
324
 
325
        if (nladdr->nl_family != AF_NETLINK)
326
                return -EINVAL;
327
 
328
        /* Only superuser is allowed to listen multicasts */
329
        if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
330
                return -EPERM;
331
 
332
        if (sk->protinfo.af_netlink->pid) {
333
                if (nladdr->nl_pid != sk->protinfo.af_netlink->pid)
334
                        return -EINVAL;
335
                sk->protinfo.af_netlink->groups = nladdr->nl_groups;
336
                return 0;
337
        }
338
 
339
        if (nladdr->nl_pid == 0) {
340
                err = netlink_autobind(sock);
341
                if (err == 0)
342
                        sk->protinfo.af_netlink->groups = nladdr->nl_groups;
343
                return err;
344
        }
345
 
346
        err = netlink_insert(sk, nladdr->nl_pid);
347
        if (err == 0)
348
                sk->protinfo.af_netlink->groups = nladdr->nl_groups;
349
        return err;
350
}
351
 
352
static int netlink_connect(struct socket *sock, struct sockaddr *addr,
353
                           int alen, int flags)
354
{
355
        int err = 0;
356
        struct sock *sk = sock->sk;
357
        struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
358
 
359
        if (addr->sa_family == AF_UNSPEC) {
360
                sk->protinfo.af_netlink->dst_pid = 0;
361
                sk->protinfo.af_netlink->dst_groups = 0;
362
                return 0;
363
        }
364
        if (addr->sa_family != AF_NETLINK)
365
                return -EINVAL;
366
 
367
        /* Only superuser is allowed to send multicasts */
368
        if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
369
                return -EPERM;
370
 
371
        if (!sk->protinfo.af_netlink->pid)
372
                err = netlink_autobind(sock);
373
 
374
        if (err == 0) {
375
                sk->protinfo.af_netlink->dst_pid = nladdr->nl_pid;
376
                sk->protinfo.af_netlink->dst_groups = nladdr->nl_groups;
377
        }
378
 
379
        return 0;
380
}
381
 
382
static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
383
{
384
        struct sock *sk = sock->sk;
385
        struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
386
 
387
        nladdr->nl_family = AF_NETLINK;
388
        nladdr->nl_pad = 0;
389
        *addr_len = sizeof(*nladdr);
390
 
391
        if (peer) {
392
                nladdr->nl_pid = sk->protinfo.af_netlink->dst_pid;
393
                nladdr->nl_groups = sk->protinfo.af_netlink->dst_groups;
394
        } else {
395
                nladdr->nl_pid = sk->protinfo.af_netlink->pid;
396
                nladdr->nl_groups = sk->protinfo.af_netlink->groups;
397
        }
398
        return 0;
399
}
400
 
401
static void netlink_overrun(struct sock *sk)
402
{
403
        if (!test_and_set_bit(0, &sk->protinfo.af_netlink->state)) {
404
                sk->err = ENOBUFS;
405
                sk->error_report(sk);
406
        }
407
}
408
 
409
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
410
{
411
        struct sock *sk;
412
        int len = skb->len;
413
        int protocol = ssk->protocol;
414
        long timeo;
415
        DECLARE_WAITQUEUE(wait, current);
416
 
417
        timeo = sock_sndtimeo(ssk, nonblock);
418
 
419
retry:
420
        sk = netlink_lookup(protocol, pid);
421
        if (sk == NULL)
422
                goto no_dst;
423
 
424
        /* Don't bother queuing skb if kernel socket has no input function */
425
        if (sk->protinfo.af_netlink->pid == 0 &&
426
            !sk->protinfo.af_netlink->data_ready)
427
                goto no_dst;
428
 
429
#ifdef NL_EMULATE_DEV
430
        if (sk->protinfo.af_netlink->handler) {
431
                skb_orphan(skb);
432
                len = sk->protinfo.af_netlink->handler(protocol, skb);
433
                sock_put(sk);
434
                return len;
435
        }
436
#endif
437
 
438
        if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
439
            test_bit(0, &sk->protinfo.af_netlink->state)) {
440
                if (!timeo) {
441
                        if (ssk->protinfo.af_netlink->pid == 0)
442
                                netlink_overrun(sk);
443
                        sock_put(sk);
444
                        kfree_skb(skb);
445
                        return -EAGAIN;
446
                }
447
 
448
                __set_current_state(TASK_INTERRUPTIBLE);
449
                add_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
450
 
451
                if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
452
                    test_bit(0, &sk->protinfo.af_netlink->state)) &&
453
                    !sk->dead)
454
                        timeo = schedule_timeout(timeo);
455
 
456
                __set_current_state(TASK_RUNNING);
457
                remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
458
                sock_put(sk);
459
 
460
                if (signal_pending(current)) {
461
                        kfree_skb(skb);
462
                        return sock_intr_errno(timeo);
463
                }
464
                goto retry;
465
        }
466
 
467
        skb_orphan(skb);
468
        skb_set_owner_r(skb, sk);
469
        skb_queue_tail(&sk->receive_queue, skb);
470
        sk->data_ready(sk, len);
471
        sock_put(sk);
472
        return len;
473
 
474
no_dst:
475
        kfree_skb(skb);
476
        return -ECONNREFUSED;
477
}
478
 
479
static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
480
{
481
#ifdef NL_EMULATE_DEV
482
        if (sk->protinfo.af_netlink->handler) {
483
                skb_orphan(skb);
484
                sk->protinfo.af_netlink->handler(sk->protocol, skb);
485
                return 0;
486
        } else
487
#endif
488
        if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
489
            !test_bit(0, &sk->protinfo.af_netlink->state)) {
490
                skb_orphan(skb);
491
                skb_set_owner_r(skb, sk);
492
                skb_queue_tail(&sk->receive_queue, skb);
493
                sk->data_ready(sk, skb->len);
494
                return 0;
495
        }
496
        return -1;
497
}
498
 
499
void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
500
                       u32 group, int allocation)
501
{
502
        struct sock *sk;
503
        struct sk_buff *skb2 = NULL;
504
        int protocol = ssk->protocol;
505
        int failure = 0;
506
 
507
        /* While we sleep in clone, do not allow to change socket list */
508
 
509
        netlink_lock_table();
510
 
511
        for (sk = nl_table[protocol]; sk; sk = sk->next) {
512
                if (ssk == sk)
513
                        continue;
514
 
515
                if (sk->protinfo.af_netlink->pid == pid ||
516
                    !(sk->protinfo.af_netlink->groups&group))
517
                        continue;
518
 
519
                if (failure) {
520
                        netlink_overrun(sk);
521
                        continue;
522
                }
523
 
524
                sock_hold(sk);
525
                if (skb2 == NULL) {
526
                        if (atomic_read(&skb->users) != 1) {
527
                                skb2 = skb_clone(skb, allocation);
528
                        } else {
529
                                skb2 = skb;
530
                                atomic_inc(&skb->users);
531
                        }
532
                }
533
                if (skb2 == NULL) {
534
                        netlink_overrun(sk);
535
                        /* Clone failed. Notify ALL listeners. */
536
                        failure = 1;
537
                } else if (netlink_broadcast_deliver(sk, skb2)) {
538
                        netlink_overrun(sk);
539
                } else
540
                        skb2 = NULL;
541
                sock_put(sk);
542
        }
543
 
544
        netlink_unlock_table();
545
 
546
        if (skb2)
547
                kfree_skb(skb2);
548
        kfree_skb(skb);
549
}
550
 
551
void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
552
{
553
        struct sock *sk;
554
        int protocol = ssk->protocol;
555
 
556
        read_lock(&nl_table_lock);
557
        for (sk = nl_table[protocol]; sk; sk = sk->next) {
558
                if (ssk == sk)
559
                        continue;
560
 
561
                if (sk->protinfo.af_netlink->pid == pid ||
562
                    !(sk->protinfo.af_netlink->groups&group))
563
                        continue;
564
 
565
                sk->err = code;
566
                sk->error_report(sk);
567
        }
568
        read_unlock(&nl_table_lock);
569
}
570
 
571
static inline void netlink_rcv_wake(struct sock *sk)
572
{
573
        if (skb_queue_len(&sk->receive_queue) == 0)
574
                clear_bit(0, &sk->protinfo.af_netlink->state);
575
        if (!test_bit(0, &sk->protinfo.af_netlink->state))
576
                wake_up_interruptible(&sk->protinfo.af_netlink->wait);
577
}
578
 
579
static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, int len,
580
                           struct scm_cookie *scm)
581
{
582
        struct sock *sk = sock->sk;
583
        struct sockaddr_nl *addr=msg->msg_name;
584
        u32 dst_pid;
585
        u32 dst_groups;
586
        struct sk_buff *skb;
587
        int err;
588
 
589
        if (msg->msg_flags&MSG_OOB)
590
                return -EOPNOTSUPP;
591
 
592
        if (msg->msg_namelen) {
593
                if (addr->nl_family != AF_NETLINK)
594
                        return -EINVAL;
595
                dst_pid = addr->nl_pid;
596
                dst_groups = addr->nl_groups;
597
                if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
598
                        return -EPERM;
599
        } else {
600
                dst_pid = sk->protinfo.af_netlink->dst_pid;
601
                dst_groups = sk->protinfo.af_netlink->dst_groups;
602
        }
603
 
604
        if (!sk->protinfo.af_netlink->pid) {
605
                err = netlink_autobind(sock);
606
                if (err)
607
                        goto out;
608
        }
609
 
610
        err = -EMSGSIZE;
611
        if ((unsigned)len > sk->sndbuf-32)
612
                goto out;
613
        err = -ENOBUFS;
614
        skb = alloc_skb(len, GFP_KERNEL);
615
        if (skb==NULL)
616
                goto out;
617
 
618
        NETLINK_CB(skb).pid = sk->protinfo.af_netlink->pid;
619
        NETLINK_CB(skb).groups = sk->protinfo.af_netlink->groups;
620
        NETLINK_CB(skb).dst_pid = dst_pid;
621
        NETLINK_CB(skb).dst_groups = dst_groups;
622
        memcpy(NETLINK_CREDS(skb), &scm->creds, sizeof(struct ucred));
623
 
624
        /* What can I do? Netlink is asynchronous, so that
625
           we will have to save current capabilities to
626
           check them, when this message will be delivered
627
           to corresponding kernel module.   --ANK (980802)
628
         */
629
        NETLINK_CB(skb).eff_cap = current->cap_effective;
630
 
631
        err = -EFAULT;
632
        if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
633
                kfree_skb(skb);
634
                goto out;
635
        }
636
 
637
        if (dst_groups) {
638
                atomic_inc(&skb->users);
639
                netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
640
        }
641
        err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
642
 
643
out:
644
        return err;
645
}
646
 
647
static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, int len,
648
                           int flags, struct scm_cookie *scm)
649
{
650
        struct sock *sk = sock->sk;
651
        int noblock = flags&MSG_DONTWAIT;
652
        int copied;
653
        struct sk_buff *skb;
654
        int err;
655
 
656
        if (flags&MSG_OOB)
657
                return -EOPNOTSUPP;
658
 
659
        copied = 0;
660
 
661
        skb = skb_recv_datagram(sk,flags,noblock,&err);
662
        if (skb==NULL)
663
                goto out;
664
 
665
        msg->msg_namelen = 0;
666
 
667
        copied = skb->len;
668
        if (len < copied) {
669
                msg->msg_flags |= MSG_TRUNC;
670
                copied = len;
671
        }
672
 
673
        skb->h.raw = skb->data;
674
        err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
675
 
676
        if (msg->msg_name) {
677
                struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
678
                addr->nl_family = AF_NETLINK;
679
                addr->nl_pad    = 0;
680
                addr->nl_pid    = NETLINK_CB(skb).pid;
681
                addr->nl_groups = NETLINK_CB(skb).dst_groups;
682
                msg->msg_namelen = sizeof(*addr);
683
        }
684
 
685
        scm->creds = *NETLINK_CREDS(skb);
686
        skb_free_datagram(sk, skb);
687
 
688
        if (sk->protinfo.af_netlink->cb
689
            && atomic_read(&sk->rmem_alloc) <= sk->rcvbuf/2)
690
                netlink_dump(sk);
691
 
692
out:
693
        netlink_rcv_wake(sk);
694
        return err ? : copied;
695
}
696
 
697
void netlink_data_ready(struct sock *sk, int len)
698
{
699
        if (sk->protinfo.af_netlink->data_ready)
700
                sk->protinfo.af_netlink->data_ready(sk, len);
701
        netlink_rcv_wake(sk);
702
}
703
 
704
/*
705
 *      We export these functions to other modules. They provide a
706
 *      complete set of kernel non-blocking support for message
707
 *      queueing.
708
 */
709
 
710
struct sock *
711
netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
712
{
713
        struct socket *sock;
714
        struct sock *sk;
715
 
716
        if (unit<0 || unit>=MAX_LINKS)
717
                return NULL;
718
 
719
        if (!(sock = sock_alloc()))
720
                return NULL;
721
 
722
        sock->type = SOCK_RAW;
723
 
724
        if (netlink_create(sock, unit) < 0) {
725
                sock_release(sock);
726
                return NULL;
727
        }
728
        sk = sock->sk;
729
        sk->data_ready = netlink_data_ready;
730
        if (input)
731
                sk->protinfo.af_netlink->data_ready = input;
732
 
733
        netlink_insert(sk, 0);
734
        return sk;
735
}
736
 
737
void netlink_set_nonroot(int protocol, unsigned flags)
738
{
739
        if ((unsigned)protocol < MAX_LINKS)
740
                nl_nonroot[protocol] = flags;
741
}
742
 
743
static void netlink_destroy_callback(struct netlink_callback *cb)
744
{
745
        if (cb->skb)
746
                kfree_skb(cb->skb);
747
        kfree(cb);
748
}
749
 
750
/*
751
 * It looks a bit ugly.
752
 * It would be better to create kernel thread.
753
 */
754
 
755
static int netlink_dump(struct sock *sk)
756
{
757
        struct netlink_callback *cb;
758
        struct sk_buff *skb;
759
        struct nlmsghdr *nlh;
760
        int len;
761
 
762
        skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
763
        if (!skb)
764
                return -ENOBUFS;
765
 
766
        spin_lock(&sk->protinfo.af_netlink->cb_lock);
767
 
768
        cb = sk->protinfo.af_netlink->cb;
769
        if (cb == NULL) {
770
                spin_unlock(&sk->protinfo.af_netlink->cb_lock);
771
                kfree_skb(skb);
772
                return -EINVAL;
773
        }
774
 
775
        len = cb->dump(skb, cb);
776
 
777
        if (len > 0) {
778
                spin_unlock(&sk->protinfo.af_netlink->cb_lock);
779
                skb_queue_tail(&sk->receive_queue, skb);
780
                sk->data_ready(sk, len);
781
                return 0;
782
        }
783
 
784
        nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
785
        nlh->nlmsg_flags |= NLM_F_MULTI;
786
        memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
787
        skb_queue_tail(&sk->receive_queue, skb);
788
        sk->data_ready(sk, skb->len);
789
 
790
        cb->done(cb);
791
        sk->protinfo.af_netlink->cb = NULL;
792
        spin_unlock(&sk->protinfo.af_netlink->cb_lock);
793
 
794
        netlink_destroy_callback(cb);
795
        sock_put(sk);
796
        return 0;
797
}
798
 
799
int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
800
                       struct nlmsghdr *nlh,
801
                       int (*dump)(struct sk_buff *skb, struct netlink_callback*),
802
                       int (*done)(struct netlink_callback*))
803
{
804
        struct netlink_callback *cb;
805
        struct sock *sk;
806
 
807
        cb = kmalloc(sizeof(*cb), GFP_KERNEL);
808
        if (cb == NULL)
809
                return -ENOBUFS;
810
 
811
        memset(cb, 0, sizeof(*cb));
812
        cb->dump = dump;
813
        cb->done = done;
814
        cb->nlh = nlh;
815
        atomic_inc(&skb->users);
816
        cb->skb = skb;
817
 
818
        sk = netlink_lookup(ssk->protocol, NETLINK_CB(skb).pid);
819
        if (sk == NULL) {
820
                netlink_destroy_callback(cb);
821
                return -ECONNREFUSED;
822
        }
823
        /* A dump is in progress... */
824
        spin_lock(&sk->protinfo.af_netlink->cb_lock);
825
        if (sk->protinfo.af_netlink->cb) {
826
                spin_unlock(&sk->protinfo.af_netlink->cb_lock);
827
                netlink_destroy_callback(cb);
828
                sock_put(sk);
829
                return -EBUSY;
830
        }
831
        sk->protinfo.af_netlink->cb = cb;
832
        spin_unlock(&sk->protinfo.af_netlink->cb_lock);
833
 
834
        netlink_dump(sk);
835
        return 0;
836
}
837
 
838
void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
839
{
840
        struct sk_buff *skb;
841
        struct nlmsghdr *rep;
842
        struct nlmsgerr *errmsg;
843
        int size;
844
 
845
        if (err == 0)
846
                size = NLMSG_SPACE(sizeof(struct nlmsgerr));
847
        else
848
                size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
849
 
850
        skb = alloc_skb(size, GFP_KERNEL);
851
        if (!skb) {
852
                struct sock *sk;
853
 
854
                sk = netlink_lookup(in_skb->sk->protocol,
855
                                    NETLINK_CB(in_skb).pid);
856
                if (sk) {
857
                        sk->err = ENOBUFS;
858
                        sk->error_report(sk);
859
                        sock_put(sk);
860
                }
861
        }
862
 
863
        rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
864
                          NLMSG_ERROR, sizeof(struct nlmsgerr));
865
        errmsg = NLMSG_DATA(rep);
866
        errmsg->error = err;
867
        memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
868
        netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
869
}
870
 
871
 
872
#ifdef NL_EMULATE_DEV
873
 
874
static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
875
 
876
/*
877
 *      Backward compatibility.
878
 */
879
 
880
int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
881
{
882
        struct sock *sk = netlink_kernel_create(unit, NULL);
883
        if (sk == NULL)
884
                return -ENOBUFS;
885
        sk->protinfo.af_netlink->handler = function;
886
        write_lock_bh(&nl_emu_lock);
887
        netlink_kernel[unit] = sk->socket;
888
        write_unlock_bh(&nl_emu_lock);
889
        return 0;
890
}
891
 
892
void netlink_detach(int unit)
893
{
894
        struct socket *sock;
895
 
896
        write_lock_bh(&nl_emu_lock);
897
        sock = netlink_kernel[unit];
898
        netlink_kernel[unit] = NULL;
899
        write_unlock_bh(&nl_emu_lock);
900
 
901
        sock_release(sock);
902
}
903
 
904
int netlink_post(int unit, struct sk_buff *skb)
905
{
906
        struct socket *sock;
907
 
908
        read_lock(&nl_emu_lock);
909
        sock = netlink_kernel[unit];
910
        if (sock) {
911
                struct sock *sk = sock->sk;
912
                memset(skb->cb, 0, sizeof(skb->cb));
913
                sock_hold(sk);
914
                read_unlock(&nl_emu_lock);
915
 
916
                netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
917
 
918
                sock_put(sk);
919
                return 0;
920
        }
921
        read_unlock(&nl_emu_lock);
922
        return -EUNATCH;
923
}
924
 
925
#endif
926
 
927
 
928
#ifdef CONFIG_PROC_FS
929
static int netlink_read_proc(char *buffer, char **start, off_t offset,
930
                             int length, int *eof, void *data)
931
{
932
        off_t pos=0;
933
        off_t begin=0;
934
        int len=0;
935
        int i;
936
        struct sock *s;
937
 
938
        len+= sprintf(buffer,"sk       Eth Pid    Groups   "
939
                      "Rmem     Wmem     Dump     Locks\n");
940
 
941
        for (i=0; i<MAX_LINKS; i++) {
942
                read_lock(&nl_table_lock);
943
                for (s = nl_table[i]; s; s = s->next) {
944
                        len+=sprintf(buffer+len,"%p %-3d %-6d %08x %-8d %-8d %p %d",
945
                                     s,
946
                                     s->protocol,
947
                                     s->protinfo.af_netlink->pid,
948
                                     s->protinfo.af_netlink->groups,
949
                                     atomic_read(&s->rmem_alloc),
950
                                     atomic_read(&s->wmem_alloc),
951
                                     s->protinfo.af_netlink->cb,
952
                                     atomic_read(&s->refcnt)
953
                                     );
954
 
955
                        buffer[len++]='\n';
956
 
957
                        pos=begin+len;
958
                        if(pos<offset) {
959
                                len=0;
960
                                begin=pos;
961
                        }
962
                        if(pos>offset+length) {
963
                                read_unlock(&nl_table_lock);
964
                                goto done;
965
                        }
966
                }
967
                read_unlock(&nl_table_lock);
968
        }
969
        *eof = 1;
970
 
971
done:
972
        *start=buffer+(offset-begin);
973
        len-=(offset-begin);
974
        if(len>length)
975
                len=length;
976
        if(len<0)
977
                len=0;
978
        return len;
979
}
980
#endif
981
 
982
int netlink_register_notifier(struct notifier_block *nb)
983
{
984
        return notifier_chain_register(&netlink_chain, nb);
985
}
986
 
987
int netlink_unregister_notifier(struct notifier_block *nb)
988
{
989
        return notifier_chain_unregister(&netlink_chain, nb);
990
}
991
 
992
struct proto_ops netlink_ops = {
993
        family:         PF_NETLINK,
994
 
995
        release:        netlink_release,
996
        bind:           netlink_bind,
997
        connect:        netlink_connect,
998
        socketpair:     sock_no_socketpair,
999
        accept:         sock_no_accept,
1000
        getname:        netlink_getname,
1001
        poll:           datagram_poll,
1002
        ioctl:          sock_no_ioctl,
1003
        listen:         sock_no_listen,
1004
        shutdown:       sock_no_shutdown,
1005
        setsockopt:     sock_no_setsockopt,
1006
        getsockopt:     sock_no_getsockopt,
1007
        sendmsg:        netlink_sendmsg,
1008
        recvmsg:        netlink_recvmsg,
1009
        mmap:           sock_no_mmap,
1010
        sendpage:       sock_no_sendpage,
1011
};
1012
 
1013
struct net_proto_family netlink_family_ops = {
1014
        PF_NETLINK,
1015
        netlink_create
1016
};
1017
 
1018
static int __init netlink_proto_init(void)
1019
{
1020
        struct sk_buff *dummy_skb;
1021
 
1022
        if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) {
1023
                printk(KERN_CRIT "netlink_init: panic\n");
1024
                return -1;
1025
        }
1026
        sock_register(&netlink_family_ops);
1027
#ifdef CONFIG_PROC_FS
1028
        create_proc_read_entry("net/netlink", 0, 0, netlink_read_proc, NULL);
1029
#endif
1030
        return 0;
1031
}
1032
 
1033
static void __exit netlink_proto_exit(void)
1034
{
1035
       sock_unregister(PF_NETLINK);
1036
       remove_proc_entry("net/netlink", NULL);
1037
}
1038
 
1039
module_init(netlink_proto_init);
1040
module_exit(netlink_proto_exit);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.