OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [net/] [bsd_tcpip/] [v2_0/] [src/] [sys/] [kern/] [uipc_mbuf.c] - Blame information for rev 174

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//      src/sys/kern/uipc_mbuf.c
4
//
5
//==========================================================================
6
//####BSDCOPYRIGHTBEGIN####
7
//
8
// -------------------------------------------
9
//
10
// Portions of this software may have been derived from OpenBSD, 
11
// FreeBSD or other sources, and are covered by the appropriate
12
// copyright disclaimers included herein.
13
//
14
// Portions created by Red Hat are
15
// Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16
//
17
// -------------------------------------------
18
//
19
//####BSDCOPYRIGHTEND####
20
//==========================================================================
21
 
22
/*
23
 * Copyright (c) 1982, 1986, 1988, 1991, 1993
24
 *      The Regents of the University of California.  All rights reserved.
25
 *
26
 * Redistribution and use in source and binary forms, with or without
27
 * modification, are permitted provided that the following conditions
28
 * are met:
29
 * 1. Redistributions of source code must retain the above copyright
30
 *    notice, this list of conditions and the following disclaimer.
31
 * 2. Redistributions in binary form must reproduce the above copyright
32
 *    notice, this list of conditions and the following disclaimer in the
33
 *    documentation and/or other materials provided with the distribution.
34
 * 3. All advertising materials mentioning features or use of this software
35
 *    must display the following acknowledgement:
36
 *      This product includes software developed by the University of
37
 *      California, Berkeley and its contributors.
38
 * 4. Neither the name of the University nor the names of its contributors
39
 *    may be used to endorse or promote products derived from this software
40
 *    without specific prior written permission.
41
 *
42
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
43
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
46
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52
 * SUCH DAMAGE.
53
 *
54
 *      @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
55
 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.7 2001/07/30 23:28:00 peter Exp $
56
 */
57
 
58
#include <sys/param.h>
59
#include <sys/malloc.h>
60
#include <sys/mbuf.h>
61
#include <sys/domain.h>
62
#include <sys/protosw.h>
63
 
64
static void mbinit __P((void *));
65
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
66
 
67
struct mbuf *mbutl;
68
char    *mclrefcnt;
69
struct mbstat mbstat;
70
u_long  mbtypes[MT_NTYPES];
71
struct mbuf *mmbfree;
72
union mcluster *mclfree;
73
int     max_linkhdr;
74
int     max_protohdr;
75
int     max_hdr;
76
int     max_datalen;
77
u_int   m_mballoc_wid = 0;
78
u_int   m_clalloc_wid = 0;
79
 
80
int mbuf_wait = 32;  // Time in ticks to wait for mbufs to come free
81
 
82
static void     m_reclaim __P((void));
83
 
84
#ifndef NMBCLUSTERS
85
#define NMBCLUSTERS     (512 + maxusers * 16)
86
#endif
87
#ifndef NMBUFS
88
#define NMBUFS          (nmbclusters * 4)
89
#endif
90
 
91
 
92
/* "number of clusters of pages" */
93
#define NCL_INIT        1
94
 
95
#define NMB_INIT        16
96
 
97
/* ARGSUSED*/
98
static void
99
mbinit(dummy)
100
        void *dummy;
101
{
102
        int s;
103
 
104
        mmbfree = NULL; mclfree = NULL;
105
        mbstat.m_msize = MSIZE;
106
        mbstat.m_mclbytes = MCLBYTES;
107
        mbstat.m_minclsize = MINCLSIZE;
108
        mbstat.m_mlen = MLEN;
109
        mbstat.m_mhlen = MHLEN;
110
 
111
        s = splimp();
112
        if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
113
                goto bad;
114
#if MCLBYTES <= PAGE_SIZE
115
        if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
116
                goto bad;
117
#else
118
        /* It's OK to call contigmalloc in this context. */
119
        if (m_clalloc(16, M_WAIT) == 0)
120
                goto bad;
121
#endif
122
        splx(s);
123
        return;
124
bad:
125
        panic("mbinit");
126
}
127
 
128
/*
129
 * Allocate at least nmb mbufs and place on mbuf free list.
130
 * Must be called at splimp.
131
 */
132
/* ARGSUSED */
133
int
134
m_mballoc(nmb, how)
135
        register int nmb;
136
        int how;
137
{
138
        struct mbuf *p;
139
        int i;
140
 
141
        for (i = 0; i < nmb; i++) {
142
            p = (struct mbuf *)cyg_net_mbuf_alloc(0, 0);
143
            if (p != (struct mbuf *)0) {
144
                ((struct mbuf *)p)->m_next = mmbfree;
145
                mmbfree = (struct mbuf *)p;
146
                mbstat.m_mbufs++;
147
                mbtypes[MT_FREE]++;
148
            } else {
149
                // Warn - out of mbufs?
150
                return (0);
151
            }
152
        }
153
        return (1);
154
}
155
 
156
/*
157
 * Once the mb_map has been exhausted and if the call to the allocation macros
158
 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
159
 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
160
 * designated (mbuf_wait) time.
161
 */
162
struct mbuf *
163
m_mballoc_wait(int caller, int type)
164
{
165
        struct mbuf *p;
166
        int s;
167
 
168
        s = splimp();
169
        m_mballoc_wid++;
170
        if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
171
                m_mballoc_wid--;
172
        splx(s);
173
 
174
        /*
175
         * Now that we (think) that we've got something, we will redo an
176
         * MGET, but avoid getting into another instance of m_mballoc_wait()
177
         * XXX: We retry to fetch _even_ if the sleep timed out. This is left
178
         *      this way, purposely, in the [unlikely] case that an mbuf was
179
         *      freed but the sleep was not awakened in time.
180
         */
181
        p = NULL;
182
        switch (caller) {
183
        case MGET_C:
184
                MGET(p, M_DONTWAIT, type);
185
                break;
186
        case MGETHDR_C:
187
                MGETHDR(p, M_DONTWAIT, type);
188
                break;
189
        default:
190
                panic("m_mballoc_wait: invalid caller (%d)", caller);
191
        }
192
 
193
        s = splimp();
194
        if (p != NULL) {                /* We waited and got something... */
195
                mbstat.m_wait++;
196
                /* Wake up another if we have more free. */
197
                if (mmbfree != NULL)
198
                        MMBWAKEUP();
199
        }
200
        splx(s);
201
        return (p);
202
}
203
 
204
#if MCLBYTES > PAGE_SIZE
205
static int i_want_my_mcl;
206
 
207
static void
208
kproc_mclalloc(void)
209
{
210
        int status;
211
 
212
        while (1) {
213
                tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
214
 
215
                for (; i_want_my_mcl; i_want_my_mcl--) {
216
                        if (m_clalloc(1, M_WAIT) == 0)
217
                                printf("m_clalloc failed even in process context!\n");
218
                }
219
        }
220
}
221
 
222
static struct proc *mclallocproc;
223
static struct kproc_desc mclalloc_kp = {
224
        "mclalloc",
225
        kproc_mclalloc,
226
        &mclallocproc
227
};
228
SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
229
           &mclalloc_kp);
230
#endif
231
 
232
/*
233
 * Allocate some number of mbuf clusters
234
 * and place on cluster free list.
235
 * Must be called at splimp.
236
 */
237
/* ARGSUSED */
238
int
239
m_clalloc(ncl, how)
240
        register int ncl;
241
        int how;
242
{
243
        union mcluster *p;
244
        int i;
245
 
246
        for (i = 0; i < ncl; i++) {
247
            p = (union mcluster *)cyg_net_cluster_alloc();
248
            if (p != (union mcluster *)0) {
249
                ((union mcluster *)p)->mcl_next = mclfree;
250
                mclfree = (union mcluster *)p;
251
                mbstat.m_clfree++;
252
                mbstat.m_clusters++;
253
            } else {
254
                // Warn - no more clusters?
255
                return (0);
256
            }
257
        }
258
        return (1);
259
}
260
 
261
/*
262
 * Once the mb_map submap has been exhausted and the allocation is called with
263
 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
264
 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
265
 * due to sudden mcluster availability.
266
 */
267
caddr_t
268
m_clalloc_wait(void)
269
{
270
        caddr_t p;
271
        int s;
272
 
273
        /* Sleep until something's available or until we expire. */
274
        m_clalloc_wid++;
275
        if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
276
                m_clalloc_wid--;
277
 
278
        /*
279
         * Now that we (think) that we've got something, we will redo and
280
         * MGET, but avoid getting into another instance of m_clalloc_wait()
281
         */
282
        p = NULL;
283
        MCLALLOC(p, M_DONTWAIT);
284
 
285
        s = splimp();
286
        if (p != NULL) {        /* We waited and got something... */
287
                mbstat.m_wait++;
288
                /* Wake up another if we have more free. */
289
                if (mclfree != NULL)
290
                        MCLWAKEUP();
291
        }
292
 
293
        splx(s);
294
        return (p);
295
}
296
 
297
/*
298
 * When MGET fails, ask protocols to free space when short of memory,
299
 * then re-attempt to allocate an mbuf.
300
 */
301
struct mbuf *
302
m_retry(i, t)
303
        int i, t;
304
{
305
        register struct mbuf *m;
306
 
307
        /*
308
         * Must only do the reclaim if not in an interrupt context.
309
         */
310
        if (i == M_WAIT) {
311
                m_reclaim();
312
        }
313
 
314
        /*
315
         * Both m_mballoc_wait and m_retry must be nulled because
316
         * when the MGET macro is run from here, we deffinately do _not_
317
         * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
318
         */
319
#undef m_retry
320
#undef m_mballoc_wait
321
#define m_mballoc_wait(caller,type)    (struct mbuf *)0
322
#define m_retry(i, t)   (struct mbuf *)0
323
        MGET(m, i, t);
324
#undef m_retry
325
#undef m_mballoc_wait
326
#define m_retry cyg_m_retry
327
#define m_retryhdr cyg_m_retryhdr
328
#define m_mballoc_wait cyg_m_mballoc_wait
329
 
330
        if (m != NULL)
331
                mbstat.m_wait++;
332
        else
333
                mbstat.m_drops++;
334
 
335
        return (m);
336
}
337
 
338
/*
339
 * As above; retry an MGETHDR.
340
 */
341
struct mbuf *
342
m_retryhdr(i, t)
343
        int i, t;
344
{
345
        register struct mbuf *m;
346
 
347
        /*
348
         * Must only do the reclaim if not in an interrupt context.
349
         */
350
        if (i == M_WAIT) {
351
                m_reclaim();
352
        }
353
 
354
#undef m_retryhdr
355
#undef m_mballoc_wait
356
#define m_mballoc_wait(caller,type)    (struct mbuf *)0
357
#define m_retryhdr(i, t) (struct mbuf *)0
358
        MGETHDR(m, i, t);
359
#undef m_retryhdr
360
#undef m_mballoc_wait
361
#define m_retry cyg_m_retry
362
#define m_retryhdr cyg_m_retryhdr
363
#define m_mballoc_wait cyg_m_mballoc_wait
364
 
365
        if (m != NULL)
366
                mbstat.m_wait++;
367
        else
368
                mbstat.m_drops++;
369
 
370
        return (m);
371
}
372
 
373
static void
374
m_reclaim()
375
{
376
        register struct domain *dp;
377
        register struct protosw *pr;
378
        int s = splimp();
379
 
380
        for (dp = domains; dp; dp = dp->dom_next)
381
                for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
382
                        if (pr->pr_drain)
383
                                (*pr->pr_drain)();
384
        splx(s);
385
        mbstat.m_drain++;
386
}
387
 
388
/*
389
 * Space allocation routines.
390
 * These are also available as macros
391
 * for critical paths.
392
 */
393
struct mbuf *
394
m_get(how, type)
395
        int how, type;
396
{
397
        register struct mbuf *m;
398
 
399
        MGET(m, how, type);
400
        return (m);
401
}
402
 
403
struct mbuf *
404
m_gethdr(how, type)
405
        int how, type;
406
{
407
        register struct mbuf *m;
408
 
409
        MGETHDR(m, how, type);
410
        return (m);
411
}
412
 
413
struct mbuf *
414
m_getclr(how, type)
415
        int how, type;
416
{
417
        register struct mbuf *m;
418
 
419
        MGET(m, how, type);
420
        if (m == 0)
421
                return (0);
422
        bzero(mtod(m, caddr_t), MLEN);
423
        return (m);
424
}
425
 
426
/*
427
 * struct mbuf *
428
 * m_getm(m, len, how, type)
429
 *
430
 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
431
 * best) and return a pointer to the top of the allocated chain. If m is
432
 * non-null, then we assume that it is a single mbuf or an mbuf chain to
433
 * which we want len bytes worth of mbufs and/or clusters attached, and so
434
 * if we succeed in allocating it, we will just return a pointer to m.
435
 *
436
 * If we happen to fail at any point during the allocation, we will free
437
 * up everything we have already allocated and return NULL.
438
 *
439
 */
440
struct mbuf *
441
m_getm(struct mbuf *m, int len, int how, int type)
442
{
443
        struct mbuf *top, *tail, *mp, *mtail = NULL;
444
 
445
        MGET(mp, how, type);
446
        if (mp == NULL)
447
                return (NULL);
448
        else if (len > MINCLSIZE) {
449
                MCLGET(mp, how);
450
                if ((mp->m_flags & M_EXT) == 0) {
451
                        m_free(mp);
452
                        return (NULL);
453
                }
454
        }
455
        mp->m_len = 0;
456
        len -= M_TRAILINGSPACE(mp);
457
 
458
        if (m != NULL)
459
                for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
460
        else
461
                m = mp;
462
 
463
        top = tail = mp;
464
        while (len > 0) {
465
                MGET(mp, how, type);
466
                if (mp == NULL)
467
                        goto failed;
468
 
469
                tail->m_next = mp;
470
                tail = mp;
471
                if (len > MINCLSIZE) {
472
                        MCLGET(mp, how);
473
                        if ((mp->m_flags & M_EXT) == 0)
474
                                goto failed;
475
                }
476
 
477
                mp->m_len = 0;
478
                len -= M_TRAILINGSPACE(mp);
479
        }
480
 
481
        if (mtail != NULL)
482
                mtail->m_next = top;
483
        return (m);
484
 
485
failed:
486
        m_freem(top);
487
        return (NULL);
488
}
489
 
490
struct mbuf *
491
m_free(m)
492
        struct mbuf *m;
493
{
494
        register struct mbuf *n;
495
 
496
        MFREE(m, n);
497
        return (n);
498
}
499
 
500
void
501
m_freem(m)
502
        register struct mbuf *m;
503
{
504
        register struct mbuf *n;
505
        struct mbuf *orig = m;
506
 
507
        if (m == NULL)
508
                return;
509
        do {
510
                MFREE(m, n);
511
                m = n;
512
                if (m == orig) {
513
                    diag_printf("DEBUG: Circular MBUF %p!\n", orig);
514
                    return;
515
                }
516
        } while (m);
517
}
518
 
519
/*
520
 * Mbuffer utility routines.
521
 */
522
 
523
/*
524
 * Lesser-used path for M_PREPEND:
525
 * allocate new mbuf to prepend to chain,
526
 * copy junk along.
527
 */
528
struct mbuf *
529
m_prepend(m, len, how)
530
        register struct mbuf *m;
531
        int len, how;
532
{
533
        struct mbuf *mn;
534
 
535
        MGET(mn, how, m->m_type);
536
        if (mn == (struct mbuf *)NULL) {
537
                m_freem(m);
538
                return ((struct mbuf *)NULL);
539
        }
540
        if (m->m_flags & M_PKTHDR) {
541
                M_COPY_PKTHDR(mn, m);
542
                m->m_flags &= ~M_PKTHDR;
543
        }
544
        mn->m_next = m;
545
        m = mn;
546
        if (len < MHLEN)
547
                MH_ALIGN(m, len);
548
        m->m_len = len;
549
        return (m);
550
}
551
 
552
/*
553
 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
554
 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
555
 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
556
 * Note that the copy is read-only, because clusters are not copied,
557
 * only their reference counts are incremented.
558
 */
559
#define MCFail (mbstat.m_mcfail)
560
 
561
struct mbuf *
562
m_copym(m, off0, len, wait)
563
        register struct mbuf *m;
564
        int off0, wait;
565
        register int len;
566
{
567
        register struct mbuf *n, **np;
568
        register int off = off0;
569
        struct mbuf *top;
570
        int copyhdr = 0;
571
 
572
        if (off == 0 && m->m_flags & M_PKTHDR)
573
                copyhdr = 1;
574
        while (off > 0) {
575
                if (off < m->m_len)
576
                        break;
577
                off -= m->m_len;
578
                m = m->m_next;
579
        }
580
        np = &top;
581
        top = 0;
582
        while (len > 0) {
583
                if (m == 0) {
584
                        break;
585
                }
586
                MGET(n, wait, m->m_type);
587
                *np = n;
588
                if (n == 0)
589
                        goto nospace;
590
                if (copyhdr) {
591
                        M_COPY_PKTHDR(n, m);
592
                        if (len == M_COPYALL)
593
                                n->m_pkthdr.len -= off0;
594
                        else
595
                                n->m_pkthdr.len = len;
596
                        copyhdr = 0;
597
                }
598
                n->m_len = min(len, m->m_len - off);
599
                if (m->m_flags & M_EXT) {
600
                        n->m_data = m->m_data + off;
601
                        if(!m->m_ext.ext_ref)
602
                                mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
603
                        else
604
                                (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
605
                                                        m->m_ext.ext_size);
606
                        n->m_ext = m->m_ext;
607
                        n->m_flags |= M_EXT;
608
                } else
609
                        bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
610
                            (unsigned)n->m_len);
611
                if (len != M_COPYALL)
612
                        len -= n->m_len;
613
                off = 0;
614
                m = m->m_next;
615
                np = &n->m_next;
616
        }
617
        if (top == 0)
618
                MCFail++;
619
        return (top);
620
nospace:
621
        m_freem(top);
622
        MCFail++;
623
        return (0);
624
}
625
 
626
/*
627
 * Copy an entire packet, including header (which must be present).
628
 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
629
 * Note that the copy is read-only, because clusters are not copied,
630
 * only their reference counts are incremented.
631
 */
632
struct mbuf *
633
m_copypacket(m, how)
634
        struct mbuf *m;
635
        int how;
636
{
637
        struct mbuf *top, *n, *o;
638
 
639
        MGET(n, how, m->m_type);
640
        top = n;
641
        if (!n)
642
                goto nospace;
643
 
644
        M_COPY_PKTHDR(n, m);
645
        n->m_len = m->m_len;
646
        if (m->m_flags & M_EXT) {
647
                n->m_data = m->m_data;
648
                if(!m->m_ext.ext_ref)
649
                        mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
650
                else
651
                        (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
652
                                                m->m_ext.ext_size);
653
                n->m_ext = m->m_ext;
654
                n->m_flags |= M_EXT;
655
        } else {
656
                bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
657
        }
658
 
659
        m = m->m_next;
660
        while (m) {
661
                MGET(o, how, m->m_type);
662
                if (!o)
663
                        goto nospace;
664
 
665
                n->m_next = o;
666
                n = n->m_next;
667
 
668
                n->m_len = m->m_len;
669
                if (m->m_flags & M_EXT) {
670
                        n->m_data = m->m_data;
671
                        if(!m->m_ext.ext_ref)
672
                                mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
673
                        else
674
                                (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
675
                                                        m->m_ext.ext_size);
676
                        n->m_ext = m->m_ext;
677
                        n->m_flags |= M_EXT;
678
                } else {
679
                        bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
680
                }
681
 
682
                m = m->m_next;
683
        }
684
        return top;
685
nospace:
686
        m_freem(top);
687
        MCFail++;
688
        return 0;
689
}
690
 
691
/*
692
 * Copy data from an mbuf chain starting "off" bytes from the beginning,
693
 * continuing for "len" bytes, into the indicated buffer.
694
 */
695
void
696
m_copydata(m, off, len, cp)
697
        register struct mbuf *m;
698
        register int off;
699
        register int len;
700
        caddr_t cp;
701
{
702
        register unsigned count;
703
 
704
        while (off > 0) {
705
                if (off < m->m_len)
706
                        break;
707
                off -= m->m_len;
708
                m = m->m_next;
709
        }
710
        while (len > 0) {
711
                count = min(m->m_len - off, len);
712
                bcopy(mtod(m, caddr_t) + off, cp, count);
713
                len -= count;
714
                cp += count;
715
                off = 0;
716
                m = m->m_next;
717
        }
718
}
719
 
720
/*
721
 * Copy a packet header mbuf chain into a completely new chain, including
722
 * copying any mbuf clusters.  Use this instead of m_copypacket() when
723
 * you need a writable copy of an mbuf chain.
724
 */
725
struct mbuf *
726
m_dup(m, how)
727
        struct mbuf *m;
728
        int how;
729
{
730
        struct mbuf **p, *top = NULL;
731
        int remain, moff, nsize;
732
 
733
        /* Sanity check */
734
        if (m == NULL)
735
                return (0);
736
 
737
        /* While there's more data, get a new mbuf, tack it on, and fill it */
738
        remain = m->m_pkthdr.len;
739
        moff = 0;
740
        p = &top;
741
        while (remain > 0 || top == NULL) {      /* allow m->m_pkthdr.len == 0 */
742
                struct mbuf *n;
743
 
744
                /* Get the next new mbuf */
745
                MGET(n, how, m->m_type);
746
                if (n == NULL)
747
                        goto nospace;
748
                if (top == NULL) {              /* first one, must be PKTHDR */
749
                        M_COPY_PKTHDR(n, m);
750
                        nsize = MHLEN;
751
                } else                          /* not the first one */
752
                        nsize = MLEN;
753
                if (remain >= MINCLSIZE) {
754
                        MCLGET(n, how);
755
                        if ((n->m_flags & M_EXT) == 0) {
756
                                (void)m_free(n);
757
                                goto nospace;
758
                        }
759
                        nsize = MCLBYTES;
760
                }
761
                n->m_len = 0;
762
 
763
                /* Link it into the new chain */
764
                *p = n;
765
                p = &n->m_next;
766
 
767
                /* Copy data from original mbuf(s) into new mbuf */
768
                while (n->m_len < nsize && m != NULL) {
769
                        int chunk = min(nsize - n->m_len, m->m_len - moff);
770
 
771
                        bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
772
                        moff += chunk;
773
                        n->m_len += chunk;
774
                        remain -= chunk;
775
                        if (moff == m->m_len) {
776
                                m = m->m_next;
777
                                moff = 0;
778
                        }
779
                }
780
        }
781
        return (top);
782
 
783
nospace:
784
        m_freem(top);
785
        MCFail++;
786
        return (0);
787
}
788
 
789
/*
790
 * Concatenate mbuf chain n to m.
791
 * Both chains must be of the same type (e.g. MT_DATA).
792
 * Any m_pkthdr is not updated.
793
 */
794
void
795
m_cat(m, n)
796
        register struct mbuf *m, *n;
797
{
798
        while (m->m_next)
799
                m = m->m_next;
800
        while (n) {
801
                if (m->m_flags & M_EXT ||
802
                    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
803
                        /* just join the two chains */
804
                        m->m_next = n;
805
                        return;
806
                }
807
                /* splat the data from one into the other */
808
                bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
809
                    (u_int)n->m_len);
810
                m->m_len += n->m_len;
811
                n = m_free(n);
812
        }
813
}
814
 
815
void
816
m_adj(mp, req_len)
817
        struct mbuf *mp;
818
        int req_len;
819
{
820
        register int len = req_len;
821
        register struct mbuf *m;
822
        register int count;
823
 
824
        if ((m = mp) == NULL)
825
                return;
826
        if (len >= 0) {
827
                /*
828
                 * Trim from head.
829
                 */
830
                while (m != NULL && len > 0) {
831
                        if (m->m_len <= len) {
832
                                len -= m->m_len;
833
                                m->m_len = 0;
834
                                m = m->m_next;
835
                        } else {
836
                                m->m_len -= len;
837
                                m->m_data += len;
838
                                len = 0;
839
                        }
840
                }
841
                m = mp;
842
                if (mp->m_flags & M_PKTHDR)
843
                        m->m_pkthdr.len -= (req_len - len);
844
        } else {
845
                /*
846
                 * Trim from tail.  Scan the mbuf chain,
847
                 * calculating its length and finding the last mbuf.
848
                 * If the adjustment only affects this mbuf, then just
849
                 * adjust and return.  Otherwise, rescan and truncate
850
                 * after the remaining size.
851
                 */
852
                len = -len;
853
                count = 0;
854
                for (;;) {
855
                        count += m->m_len;
856
                        if (m->m_next == (struct mbuf *)0)
857
                                break;
858
                        m = m->m_next;
859
                }
860
                if (m->m_len >= len) {
861
                        m->m_len -= len;
862
                        if (mp->m_flags & M_PKTHDR)
863
                                mp->m_pkthdr.len -= len;
864
                        return;
865
                }
866
                count -= len;
867
                if (count < 0)
868
                        count = 0;
869
                /*
870
                 * Correct length for chain is "count".
871
                 * Find the mbuf with last data, adjust its length,
872
                 * and toss data from remaining mbufs on chain.
873
                 */
874
                m = mp;
875
                if (m->m_flags & M_PKTHDR)
876
                        m->m_pkthdr.len = count;
877
                for (; m; m = m->m_next) {
878
                        if (m->m_len >= count) {
879
                                m->m_len = count;
880
                                break;
881
                        }
882
                        count -= m->m_len;
883
                }
884
                while (m->m_next)
885
                        (m = m->m_next) ->m_len = 0;
886
        }
887
}
888
 
889
/*
890
 * Rearange an mbuf chain so that len bytes are contiguous
891
 * and in the data area of an mbuf (so that mtod and dtom
892
 * will work for a structure of size len).  Returns the resulting
893
 * mbuf chain on success, frees it and returns null on failure.
894
 * If there is room, it will add up to max_protohdr-len extra bytes to the
895
 * contiguous region in an attempt to avoid being called next time.
896
 */
897
#define MPFail (mbstat.m_mpfail)
898
 
899
struct mbuf *
900
m_pullup(n, len)
901
        register struct mbuf *n;
902
        int len;
903
{
904
        register struct mbuf *m;
905
        register int count;
906
        int space;
907
 
908
        /*
909
         * If first mbuf has no cluster, and has room for len bytes
910
         * without shifting current data, pullup into it,
911
         * otherwise allocate a new mbuf to prepend to the chain.
912
         */
913
        if ((n->m_flags & M_EXT) == 0 &&
914
            n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
915
                if (n->m_len >= len)
916
                        return (n);
917
                m = n;
918
                n = n->m_next;
919
                len -= m->m_len;
920
        } else {
921
                if (len > MHLEN)
922
                        goto bad;
923
                MGET(m, M_DONTWAIT, n->m_type);
924
                if (m == 0)
925
                        goto bad;
926
                m->m_len = 0;
927
                if (n->m_flags & M_PKTHDR) {
928
                        M_COPY_PKTHDR(m, n);
929
                        n->m_flags &= ~M_PKTHDR;
930
                }
931
        }
932
        space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
933
        do {
934
                count = min(min(max(len, max_protohdr), space), n->m_len);
935
                bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
936
                  (unsigned)count);
937
                len -= count;
938
                m->m_len += count;
939
                n->m_len -= count;
940
                space -= count;
941
                if (n->m_len)
942
                        n->m_data += count;
943
                else
944
                        n = m_free(n);
945
        } while (len > 0 && n);
946
        if (len > 0) {
947
                (void) m_free(m);
948
                goto bad;
949
        }
950
        m->m_next = n;
951
        return (m);
952
bad:
953
        m_freem(n);
954
        MPFail++;
955
        return (0);
956
}
957
 
958
/*
959
 * Partition an mbuf chain in two pieces, returning the tail --
960
 * all but the first len0 bytes.  In case of failure, it returns NULL and
961
 * attempts to restore the chain to its original state.
962
 */
963
struct mbuf *
964
m_split(m0, len0, wait)
965
        register struct mbuf *m0;
966
        int len0, wait;
967
{
968
        register struct mbuf *m, *n;
969
        unsigned len = len0, remain;
970
 
971
        for (m = m0; m && len > m->m_len; m = m->m_next)
972
                len -= m->m_len;
973
        if (m == 0)
974
                return (0);
975
        remain = m->m_len - len;
976
        if (m0->m_flags & M_PKTHDR) {
977
                MGETHDR(n, wait, m0->m_type);
978
                if (n == 0)
979
                        return (0);
980
                n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
981
                n->m_pkthdr.len = m0->m_pkthdr.len - len0;
982
                m0->m_pkthdr.len = len0;
983
                if (m->m_flags & M_EXT)
984
                        goto extpacket;
985
                if (remain > MHLEN) {
986
                        /* m can't be the lead packet */
987
                        MH_ALIGN(n, 0);
988
                        n->m_next = m_split(m, len, wait);
989
                        if (n->m_next == 0) {
990
                                (void) m_free(n);
991
                                return (0);
992
                        } else
993
                                return (n);
994
                } else
995
                        MH_ALIGN(n, remain);
996
        } else if (remain == 0) {
997
                n = m->m_next;
998
                m->m_next = 0;
999
                return (n);
1000
        } else {
1001
                MGET(n, wait, m->m_type);
1002
                if (n == 0)
1003
                        return (0);
1004
                M_ALIGN(n, remain);
1005
        }
1006
extpacket:
1007
        if (m->m_flags & M_EXT) {
1008
                n->m_flags |= M_EXT;
1009
                n->m_ext = m->m_ext;
1010
                if(!m->m_ext.ext_ref)
1011
                        mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
1012
                else
1013
                        (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
1014
                                                m->m_ext.ext_size);
1015
                m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
1016
                n->m_data = m->m_data + len;
1017
        } else {
1018
                bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1019
        }
1020
        n->m_len = remain;
1021
        m->m_len = len;
1022
        n->m_next = m->m_next;
1023
        m->m_next = 0;
1024
        return (n);
1025
}
1026
/*
1027
 * Routine to copy from device local memory into mbufs.
1028
 */
1029
struct mbuf *
1030
m_devget(buf, totlen, off0, ifp, copy)
1031
        char *buf;
1032
        int totlen, off0;
1033
        struct ifnet *ifp;
1034
        void (*copy) __P((char *from, caddr_t to, u_int len));
1035
{
1036
        register struct mbuf *m;
1037
        struct mbuf *top = 0, **mp = &top;
1038
        register int off = off0, len;
1039
        register char *cp;
1040
        char *epkt;
1041
 
1042
        cp = buf;
1043
        epkt = cp + totlen;
1044
        if (off) {
1045
                cp += off + 2 * sizeof(u_short);
1046
                totlen -= 2 * sizeof(u_short);
1047
        }
1048
        MGETHDR(m, M_DONTWAIT, MT_DATA);
1049
        if (m == 0)
1050
                return (0);
1051
        m->m_pkthdr.rcvif = ifp;
1052
        m->m_pkthdr.len = totlen;
1053
        m->m_len = MHLEN;
1054
 
1055
        while (totlen > 0) {
1056
                if (top) {
1057
                        MGET(m, M_DONTWAIT, MT_DATA);
1058
                        if (m == 0) {
1059
                                m_freem(top);
1060
                                return (0);
1061
                        }
1062
                        m->m_len = MLEN;
1063
                }
1064
                len = min(totlen, epkt - cp);
1065
                if (len >= MINCLSIZE) {
1066
                        MCLGET(m, M_DONTWAIT);
1067
                        if (m->m_flags & M_EXT)
1068
                                m->m_len = len = min(len, MCLBYTES);
1069
                        else
1070
                                len = m->m_len;
1071
                } else {
1072
                        /*
1073
                         * Place initial small packet/header at end of mbuf.
1074
                         */
1075
                        if (len < m->m_len) {
1076
                                if (top == 0 && len + max_linkhdr <= m->m_len)
1077
                                        m->m_data += max_linkhdr;
1078
                                m->m_len = len;
1079
                        } else
1080
                                len = m->m_len;
1081
                }
1082
                if (copy)
1083
                        copy(cp, mtod(m, caddr_t), (unsigned)len);
1084
                else
1085
                        bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1086
                cp += len;
1087
                *mp = m;
1088
                mp = &m->m_next;
1089
                totlen -= len;
1090
                if (cp == epkt)
1091
                        cp = buf;
1092
        }
1093
        return (top);
1094
}
1095
 
1096
/*
1097
 * Copy data from a buffer back into the indicated mbuf chain,
1098
 * starting "off" bytes from the beginning, extending the mbuf
1099
 * chain if necessary.
1100
 */
1101
void
1102
m_copyback(m0, off, len, cp)
1103
        struct  mbuf *m0;
1104
        register int off;
1105
        register int len;
1106
        caddr_t cp;
1107
{
1108
        register int mlen;
1109
        register struct mbuf *m = m0, *n;
1110
        int totlen = 0;
1111
 
1112
        if (m0 == 0)
1113
                return;
1114
        while (off > (mlen = m->m_len)) {
1115
                off -= mlen;
1116
                totlen += mlen;
1117
                if (m->m_next == 0) {
1118
                        n = m_getclr(M_DONTWAIT, m->m_type);
1119
                        if (n == 0)
1120
                                goto out;
1121
                        n->m_len = min(MLEN, len + off);
1122
                        m->m_next = n;
1123
                }
1124
                m = m->m_next;
1125
        }
1126
        while (len > 0) {
1127
                mlen = min (m->m_len - off, len);
1128
                bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1129
                cp += mlen;
1130
                len -= mlen;
1131
                mlen += off;
1132
                off = 0;
1133
                totlen += mlen;
1134
                if (len == 0)
1135
                        break;
1136
                if (m->m_next == 0) {
1137
                        n = m_get(M_DONTWAIT, m->m_type);
1138
                        if (n == 0)
1139
                                break;
1140
                        n->m_len = min(MLEN, len);
1141
                        m->m_next = n;
1142
                }
1143
                m = m->m_next;
1144
        }
1145
out:    if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1146
                m->m_pkthdr.len = totlen;
1147
}
1148
 
1149
#ifndef __ECOS
1150
void
1151
m_print(const struct mbuf *m)
1152
{
1153
        int len;
1154
        const struct mbuf *m2;
1155
 
1156
        len = m->m_pkthdr.len;
1157
        m2 = m;
1158
        while (len) {
1159
                printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1160
                len -= m2->m_len;
1161
                m2 = m2->m_next;
1162
        }
1163
        return;
1164
}
1165
#endif
1166
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.