OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [net/] [bsd_tcpip/] [current/] [src/] [sys/] [kern/] [uipc_mbuf.c] - Blame information for rev 786

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
//==========================================================================
2
//
3
//      src/sys/kern/uipc_mbuf.c
4
//
5
//==========================================================================
6
// ####BSDCOPYRIGHTBEGIN####                                    
7
// -------------------------------------------                  
8
// This file is part of eCos, the Embedded Configurable Operating System.
9
//
10
// Portions of this software may have been derived from FreeBSD 
11
// or other sources, and if so are covered by the appropriate copyright
12
// and license included herein.                                 
13
//
14
// Portions created by the Free Software Foundation are         
15
// Copyright (C) 2002 Free Software Foundation, Inc.            
16
// -------------------------------------------                  
17
// ####BSDCOPYRIGHTEND####                                      
18
//==========================================================================
19
 
20
/*
21
 * Copyright (c) 1982, 1986, 1988, 1991, 1993
22
 *      The Regents of the University of California.  All rights reserved.
23
 *
24
 * Redistribution and use in source and binary forms, with or without
25
 * modification, are permitted provided that the following conditions
26
 * are met:
27
 * 1. Redistributions of source code must retain the above copyright
28
 *    notice, this list of conditions and the following disclaimer.
29
 * 2. Redistributions in binary form must reproduce the above copyright
30
 *    notice, this list of conditions and the following disclaimer in the
31
 *    documentation and/or other materials provided with the distribution.
32
 * 3. All advertising materials mentioning features or use of this software
33
 *    must display the following acknowledgement:
34
 *      This product includes software developed by the University of
35
 *      California, Berkeley and its contributors.
36
 * 4. Neither the name of the University nor the names of its contributors
37
 *    may be used to endorse or promote products derived from this software
38
 *    without specific prior written permission.
39
 *
40
 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50
 * SUCH DAMAGE.
51
 *
52
 *      @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
53
 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.7 2001/07/30 23:28:00 peter Exp $
54
 */
55
 
56
#include <sys/param.h>
57
#include <sys/malloc.h>
58
#include <sys/mbuf.h>
59
#include <sys/domain.h>
60
#include <sys/protosw.h>
61
 
62
static void mbinit __P((void *));
63
SYSINIT(mbuf, SI_SUB_MBUF, SI_ORDER_FIRST, mbinit, NULL)
64
 
65
struct mbuf *mbutl;
66
char    *mclrefcnt;
67
struct mbstat mbstat;
68
u_long  mbtypes[MT_NTYPES];
69
struct mbuf *mmbfree;
70
union mcluster *mclfree;
71
int     max_linkhdr;
72
int     max_protohdr;
73
int     max_hdr;
74
int     max_datalen;
75
u_int   m_mballoc_wid = 0;
76
u_int   m_clalloc_wid = 0;
77
 
78
int mbuf_wait = 32;  // Time in ticks to wait for mbufs to come free
79
 
80
static void     m_reclaim __P((void));
81
 
82
#ifndef NMBCLUSTERS
83
#define NMBCLUSTERS     (512 + maxusers * 16)
84
#endif
85
#ifndef NMBUFS
86
#define NMBUFS          (nmbclusters * 4)
87
#endif
88
 
89
 
90
/* "number of clusters of pages" */
91
#define NCL_INIT        1
92
 
93
#define NMB_INIT        16
94
 
95
/* ARGSUSED*/
96
static void
97
mbinit(dummy)
98
        void *dummy;
99
{
100
        int s;
101
 
102
        mmbfree = NULL; mclfree = NULL;
103
        mbstat.m_msize = MSIZE;
104
        mbstat.m_mclbytes = MCLBYTES;
105
        mbstat.m_minclsize = MINCLSIZE;
106
        mbstat.m_mlen = MLEN;
107
        mbstat.m_mhlen = MHLEN;
108
 
109
        s = splimp();
110
        if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0)
111
                goto bad;
112
#if MCLBYTES <= PAGE_SIZE
113
        if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0)
114
                goto bad;
115
#else
116
        /* It's OK to call contigmalloc in this context. */
117
        if (m_clalloc(16, M_WAIT) == 0)
118
                goto bad;
119
#endif
120
        splx(s);
121
        return;
122
bad:
123
        panic("mbinit");
124
}
125
 
126
/*
127
 * Allocate at least nmb mbufs and place on mbuf free list.
128
 * Must be called at splimp.
129
 */
130
/* ARGSUSED */
131
int
132
m_mballoc(nmb, how)
133
        register int nmb;
134
        int how;
135
{
136
        struct mbuf *p;
137
        int i;
138
 
139
        for (i = 0; i < nmb; i++) {
140
            p = (struct mbuf *)cyg_net_mbuf_alloc( );
141
            if (p != (struct mbuf *)0) {
142
                ((struct mbuf *)p)->m_next = mmbfree;
143
                mmbfree = (struct mbuf *)p;
144
                mbstat.m_mbufs++;
145
                mbtypes[MT_FREE]++;
146
            } else {
147
                // Warn - out of mbufs?
148
                return (0);
149
            }
150
        }
151
        return (1);
152
}
153
 
154
/*
155
 * Once the mb_map has been exhausted and if the call to the allocation macros
156
 * (or, in some cases, functions) is with M_WAIT, then it is necessary to rely
157
 * solely on reclaimed mbufs. Here we wait for an mbuf to be freed for a
158
 * designated (mbuf_wait) time.
159
 */
160
struct mbuf *
161
m_mballoc_wait(int caller, int type)
162
{
163
        struct mbuf *p;
164
        int s;
165
 
166
        s = splimp();
167
        m_mballoc_wid++;
168
        if ((tsleep(&m_mballoc_wid, PVM, "mballc", mbuf_wait)) == EWOULDBLOCK)
169
                m_mballoc_wid--;
170
        splx(s);
171
 
172
        /*
173
         * Now that we (think) that we've got something, we will redo an
174
         * MGET, but avoid getting into another instance of m_mballoc_wait()
175
         * XXX: We retry to fetch _even_ if the sleep timed out. This is left
176
         *      this way, purposely, in the [unlikely] case that an mbuf was
177
         *      freed but the sleep was not awakened in time.
178
         */
179
        p = NULL;
180
        switch (caller) {
181
        case MGET_C:
182
                MGET(p, M_DONTWAIT, type);
183
                break;
184
        case MGETHDR_C:
185
                MGETHDR(p, M_DONTWAIT, type);
186
                break;
187
        default:
188
                panic("m_mballoc_wait: invalid caller (%d)", caller);
189
        }
190
 
191
        s = splimp();
192
        if (p != NULL) {                /* We waited and got something... */
193
                mbstat.m_wait++;
194
                /* Wake up another if we have more free. */
195
                if (mmbfree != NULL)
196
                        MMBWAKEUP();
197
        }
198
        splx(s);
199
        return (p);
200
}
201
 
202
#if MCLBYTES > PAGE_SIZE
203
static int i_want_my_mcl;
204
 
205
static void
206
kproc_mclalloc(void)
207
{
208
        int status;
209
 
210
        while (1) {
211
                tsleep(&i_want_my_mcl, PVM, "mclalloc", 0);
212
 
213
                for (; i_want_my_mcl; i_want_my_mcl--) {
214
                        if (m_clalloc(1, M_WAIT) == 0)
215
                                printf("m_clalloc failed even in process context!\n");
216
                }
217
        }
218
}
219
 
220
static struct proc *mclallocproc;
221
static struct kproc_desc mclalloc_kp = {
222
        "mclalloc",
223
        kproc_mclalloc,
224
        &mclallocproc
225
};
226
SYSINIT(mclallocproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
227
           &mclalloc_kp);
228
#endif
229
 
230
/*
231
 * Allocate some number of mbuf clusters
232
 * and place on cluster free list.
233
 * Must be called at splimp.
234
 */
235
/* ARGSUSED */
236
int
237
m_clalloc(ncl, how)
238
        register int ncl;
239
        int how;
240
{
241
        union mcluster *p;
242
        int i;
243
 
244
        for (i = 0; i < ncl; i++) {
245
            p = (union mcluster *)cyg_net_cluster_alloc();
246
            if (p != (union mcluster *)0) {
247
                ((union mcluster *)p)->mcl_next = mclfree;
248
                mclfree = (union mcluster *)p;
249
                mbstat.m_clfree++;
250
                mbstat.m_clusters++;
251
            } else {
252
                // Warn - no more clusters?
253
                return (0);
254
            }
255
        }
256
        return (1);
257
}
258
 
259
/*
260
 * Once the mb_map submap has been exhausted and the allocation is called with
261
 * M_WAIT, we rely on the mclfree union pointers. If nothing is free, we will
262
 * sleep for a designated amount of time (mbuf_wait) or until we're woken up
263
 * due to sudden mcluster availability.
264
 */
265
caddr_t
266
m_clalloc_wait(void)
267
{
268
        caddr_t p;
269
        int s;
270
 
271
        /* Sleep until something's available or until we expire. */
272
        m_clalloc_wid++;
273
        if ((tsleep(&m_clalloc_wid, PVM, "mclalc", mbuf_wait)) == EWOULDBLOCK)
274
                m_clalloc_wid--;
275
 
276
        /*
277
         * Now that we (think) that we've got something, we will redo and
278
         * MGET, but avoid getting into another instance of m_clalloc_wait()
279
         */
280
        p = NULL;
281
        MCLALLOC(p, M_DONTWAIT);
282
 
283
        s = splimp();
284
        if (p != NULL) {        /* We waited and got something... */
285
                mbstat.m_wait++;
286
                /* Wake up another if we have more free. */
287
                if (mclfree != NULL)
288
                        MCLWAKEUP();
289
        }
290
 
291
        splx(s);
292
        return (p);
293
}
294
 
295
/*
296
 * When MGET fails, ask protocols to free space when short of memory,
297
 * then re-attempt to allocate an mbuf.
298
 */
299
struct mbuf *
300
m_retry(i, t)
301
        int i, t;
302
{
303
        register struct mbuf *m;
304
 
305
        /*
306
         * Must only do the reclaim if not in an interrupt context.
307
         */
308
        if (i == M_WAIT) {
309
                m_reclaim();
310
        }
311
 
312
        /*
313
         * Both m_mballoc_wait and m_retry must be nulled because
314
         * when the MGET macro is run from here, we deffinately do _not_
315
         * want to enter an instance of m_mballoc_wait() or m_retry() (again!)
316
         */
317
#undef m_retry
318
#undef m_mballoc_wait
319
#define m_mballoc_wait(caller,type)    (struct mbuf *)0
320
#define m_retry(i, t)   (struct mbuf *)0
321
        MGET(m, i, t);
322
#undef m_retry
323
#undef m_mballoc_wait
324
#define m_retry cyg_m_retry
325
#define m_retryhdr cyg_m_retryhdr
326
#define m_mballoc_wait cyg_m_mballoc_wait
327
 
328
        if (m != NULL)
329
                mbstat.m_wait++;
330
        else
331
                mbstat.m_drops++;
332
 
333
        return (m);
334
}
335
 
336
/*
337
 * As above; retry an MGETHDR.
338
 */
339
struct mbuf *
340
m_retryhdr(i, t)
341
        int i, t;
342
{
343
        register struct mbuf *m;
344
 
345
        /*
346
         * Must only do the reclaim if not in an interrupt context.
347
         */
348
        if (i == M_WAIT) {
349
                m_reclaim();
350
        }
351
 
352
#undef m_retryhdr
353
#undef m_mballoc_wait
354
#define m_mballoc_wait(caller,type)    (struct mbuf *)0
355
#define m_retryhdr(i, t) (struct mbuf *)0
356
        MGETHDR(m, i, t);
357
#undef m_retryhdr
358
#undef m_mballoc_wait
359
#define m_retry cyg_m_retry
360
#define m_retryhdr cyg_m_retryhdr
361
#define m_mballoc_wait cyg_m_mballoc_wait
362
 
363
        if (m != NULL)
364
                mbstat.m_wait++;
365
        else
366
                mbstat.m_drops++;
367
 
368
        return (m);
369
}
370
 
371
static void
372
m_reclaim()
373
{
374
        register struct domain *dp;
375
        register struct protosw *pr;
376
        int s = splimp();
377
 
378
        for (dp = domains; dp; dp = dp->dom_next)
379
                for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
380
                        if (pr->pr_drain)
381
                                (*pr->pr_drain)();
382
        splx(s);
383
        mbstat.m_drain++;
384
}
385
 
386
/*
387
 * Space allocation routines.
388
 * These are also available as macros
389
 * for critical paths.
390
 */
391
struct mbuf *
392
m_get(how, type)
393
        int how, type;
394
{
395
        register struct mbuf *m;
396
 
397
        MGET(m, how, type);
398
        return (m);
399
}
400
 
401
struct mbuf *
402
m_gethdr(how, type)
403
        int how, type;
404
{
405
        register struct mbuf *m;
406
 
407
        MGETHDR(m, how, type);
408
        return (m);
409
}
410
 
411
struct mbuf *
412
m_getclr(how, type)
413
        int how, type;
414
{
415
        register struct mbuf *m;
416
 
417
        MGET(m, how, type);
418
        if (m == 0)
419
                return (0);
420
        bzero(mtod(m, caddr_t), MLEN);
421
        return (m);
422
}
423
 
424
/*
425
 * struct mbuf *
426
 * m_getm(m, len, how, type)
427
 *
428
 * This will allocate len-worth of mbufs and/or mbuf clusters (whatever fits
429
 * best) and return a pointer to the top of the allocated chain. If m is
430
 * non-null, then we assume that it is a single mbuf or an mbuf chain to
431
 * which we want len bytes worth of mbufs and/or clusters attached, and so
432
 * if we succeed in allocating it, we will just return a pointer to m.
433
 *
434
 * If we happen to fail at any point during the allocation, we will free
435
 * up everything we have already allocated and return NULL.
436
 *
437
 */
438
struct mbuf *
439
m_getm(struct mbuf *m, int len, int how, int type)
440
{
441
        struct mbuf *top, *tail, *mp, *mtail = NULL;
442
 
443
        MGET(mp, how, type);
444
        if (mp == NULL)
445
                return (NULL);
446
        else if (len > MINCLSIZE) {
447
                MCLGET(mp, how);
448
                if ((mp->m_flags & M_EXT) == 0) {
449
                        m_free(mp);
450
                        return (NULL);
451
                }
452
        }
453
        mp->m_len = 0;
454
        len -= M_TRAILINGSPACE(mp);
455
 
456
        if (m != NULL)
457
                for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next);
458
        else
459
                m = mp;
460
 
461
        top = tail = mp;
462
        while (len > 0) {
463
                MGET(mp, how, type);
464
                if (mp == NULL)
465
                        goto failed;
466
 
467
                tail->m_next = mp;
468
                tail = mp;
469
                if (len > MINCLSIZE) {
470
                        MCLGET(mp, how);
471
                        if ((mp->m_flags & M_EXT) == 0)
472
                                goto failed;
473
                }
474
 
475
                mp->m_len = 0;
476
                len -= M_TRAILINGSPACE(mp);
477
        }
478
 
479
        if (mtail != NULL)
480
                mtail->m_next = top;
481
        return (m);
482
 
483
failed:
484
        m_freem(top);
485
        return (NULL);
486
}
487
 
488
struct mbuf *
489
m_free(m)
490
        struct mbuf *m;
491
{
492
        register struct mbuf *n;
493
 
494
        MFREE(m, n);
495
        return (n);
496
}
497
 
498
void
499
m_freem(m)
500
        register struct mbuf *m;
501
{
502
        register struct mbuf *n;
503
        struct mbuf *orig = m;
504
 
505
        if (m == NULL)
506
                return;
507
        do {
508
                MFREE(m, n);
509
                m = n;
510
                if (m == orig) {
511
                    diag_printf("DEBUG: Circular MBUF %p!\n", orig);
512
                    return;
513
                }
514
        } while (m);
515
}
516
 
517
/*
518
 * Mbuffer utility routines.
519
 */
520
 
521
/*
522
 * Lesser-used path for M_PREPEND:
523
 * allocate new mbuf to prepend to chain,
524
 * copy junk along.
525
 */
526
struct mbuf *
527
m_prepend(m, len, how)
528
        register struct mbuf *m;
529
        int len, how;
530
{
531
        struct mbuf *mn;
532
 
533
        MGET(mn, how, m->m_type);
534
        if (mn == (struct mbuf *)NULL) {
535
                m_freem(m);
536
                return ((struct mbuf *)NULL);
537
        }
538
        if (m->m_flags & M_PKTHDR) {
539
                M_COPY_PKTHDR(mn, m);
540
                m->m_flags &= ~M_PKTHDR;
541
        }
542
        mn->m_next = m;
543
        m = mn;
544
        if (len < MHLEN)
545
                MH_ALIGN(m, len);
546
        m->m_len = len;
547
        return (m);
548
}
549
 
550
/*
551
 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
552
 * continuing for "len" bytes.  If len is M_COPYALL, copy to end of mbuf.
553
 * The wait parameter is a choice of M_WAIT/M_DONTWAIT from caller.
554
 * Note that the copy is read-only, because clusters are not copied,
555
 * only their reference counts are incremented.
556
 */
557
#define MCFail (mbstat.m_mcfail)
558
 
559
struct mbuf *
560
m_copym(m, off0, len, wait)
561
        register struct mbuf *m;
562
        int off0, wait;
563
        register int len;
564
{
565
        register struct mbuf *n, **np;
566
        register int off = off0;
567
        struct mbuf *top;
568
        int copyhdr = 0;
569
 
570
        if (off == 0 && m->m_flags & M_PKTHDR)
571
                copyhdr = 1;
572
        while (off > 0) {
573
                if (off < m->m_len)
574
                        break;
575
                off -= m->m_len;
576
                m = m->m_next;
577
        }
578
        np = &top;
579
        top = 0;
580
        while (len > 0) {
581
                if (m == 0) {
582
                        break;
583
                }
584
                MGET(n, wait, m->m_type);
585
                *np = n;
586
                if (n == 0)
587
                        goto nospace;
588
                if (copyhdr) {
589
                        M_COPY_PKTHDR(n, m);
590
                        if (len == M_COPYALL)
591
                                n->m_pkthdr.len -= off0;
592
                        else
593
                                n->m_pkthdr.len = len;
594
                        copyhdr = 0;
595
                }
596
                n->m_len = min(len, m->m_len - off);
597
                if (m->m_flags & M_EXT) {
598
                        n->m_data = m->m_data + off;
599
                        if(!m->m_ext.ext_ref)
600
                                mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
601
                        else
602
                                (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
603
                                                        m->m_ext.ext_size);
604
                        n->m_ext = m->m_ext;
605
                        n->m_flags |= M_EXT;
606
                } else
607
                        bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
608
                            (unsigned)n->m_len);
609
                if (len != M_COPYALL)
610
                        len -= n->m_len;
611
                off = 0;
612
                m = m->m_next;
613
                np = &n->m_next;
614
        }
615
        if (top == 0)
616
                MCFail++;
617
        return (top);
618
nospace:
619
        m_freem(top);
620
        MCFail++;
621
        return (0);
622
}
623
 
624
/*
625
 * Copy an entire packet, including header (which must be present).
626
 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
627
 * Note that the copy is read-only, because clusters are not copied,
628
 * only their reference counts are incremented.
629
 */
630
struct mbuf *
631
m_copypacket(m, how)
632
        struct mbuf *m;
633
        int how;
634
{
635
        struct mbuf *top, *n, *o;
636
 
637
        MGET(n, how, m->m_type);
638
        top = n;
639
        if (!n)
640
                goto nospace;
641
 
642
        M_COPY_PKTHDR(n, m);
643
        n->m_len = m->m_len;
644
        if (m->m_flags & M_EXT) {
645
                n->m_data = m->m_data;
646
                if(!m->m_ext.ext_ref)
647
                        mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
648
                else
649
                        (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
650
                                                m->m_ext.ext_size);
651
                n->m_ext = m->m_ext;
652
                n->m_flags |= M_EXT;
653
        } else {
654
                bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
655
        }
656
 
657
        m = m->m_next;
658
        while (m) {
659
                MGET(o, how, m->m_type);
660
                if (!o)
661
                        goto nospace;
662
 
663
                n->m_next = o;
664
                n = n->m_next;
665
 
666
                n->m_len = m->m_len;
667
                if (m->m_flags & M_EXT) {
668
                        n->m_data = m->m_data;
669
                        if(!m->m_ext.ext_ref)
670
                                mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
671
                        else
672
                                (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
673
                                                        m->m_ext.ext_size);
674
                        n->m_ext = m->m_ext;
675
                        n->m_flags |= M_EXT;
676
                } else {
677
                        bcopy(mtod(m, char *), mtod(n, char *), n->m_len);
678
                }
679
 
680
                m = m->m_next;
681
        }
682
        return top;
683
nospace:
684
        m_freem(top);
685
        MCFail++;
686
        return 0;
687
}
688
 
689
/*
690
 * Copy data from an mbuf chain starting "off" bytes from the beginning,
691
 * continuing for "len" bytes, into the indicated buffer.
692
 */
693
void
694
m_copydata(m, off, len, cp)
695
        register struct mbuf *m;
696
        register int off;
697
        register int len;
698
        caddr_t cp;
699
{
700
        register unsigned count;
701
 
702
        while (off > 0) {
703
                if (off < m->m_len)
704
                        break;
705
                off -= m->m_len;
706
                m = m->m_next;
707
        }
708
        while (len > 0) {
709
                count = min(m->m_len - off, len);
710
                bcopy(mtod(m, caddr_t) + off, cp, count);
711
                len -= count;
712
                cp += count;
713
                off = 0;
714
                m = m->m_next;
715
        }
716
}
717
 
718
/*
719
 * Copy a packet header mbuf chain into a completely new chain, including
720
 * copying any mbuf clusters.  Use this instead of m_copypacket() when
721
 * you need a writable copy of an mbuf chain.
722
 */
723
struct mbuf *
724
m_dup(m, how)
725
        struct mbuf *m;
726
        int how;
727
{
728
        struct mbuf **p, *top = NULL;
729
        int remain, moff, nsize;
730
 
731
        /* Sanity check */
732
        if (m == NULL)
733
                return (0);
734
 
735
        /* While there's more data, get a new mbuf, tack it on, and fill it */
736
        remain = m->m_pkthdr.len;
737
        moff = 0;
738
        p = &top;
739
        while (remain > 0 || top == NULL) {      /* allow m->m_pkthdr.len == 0 */
740
                struct mbuf *n;
741
 
742
                /* Get the next new mbuf */
743
                MGET(n, how, m->m_type);
744
                if (n == NULL)
745
                        goto nospace;
746
                if (top == NULL) {              /* first one, must be PKTHDR */
747
                        M_COPY_PKTHDR(n, m);
748
                        nsize = MHLEN;
749
                } else                          /* not the first one */
750
                        nsize = MLEN;
751
                if (remain >= MINCLSIZE) {
752
                        MCLGET(n, how);
753
                        if ((n->m_flags & M_EXT) == 0) {
754
                                (void)m_free(n);
755
                                goto nospace;
756
                        }
757
                        nsize = MCLBYTES;
758
                }
759
                n->m_len = 0;
760
 
761
                /* Link it into the new chain */
762
                *p = n;
763
                p = &n->m_next;
764
 
765
                /* Copy data from original mbuf(s) into new mbuf */
766
                while (n->m_len < nsize && m != NULL) {
767
                        int chunk = min(nsize - n->m_len, m->m_len - moff);
768
 
769
                        bcopy(m->m_data + moff, n->m_data + n->m_len, chunk);
770
                        moff += chunk;
771
                        n->m_len += chunk;
772
                        remain -= chunk;
773
                        if (moff == m->m_len) {
774
                                m = m->m_next;
775
                                moff = 0;
776
                        }
777
                }
778
        }
779
        return (top);
780
 
781
nospace:
782
        m_freem(top);
783
        MCFail++;
784
        return (0);
785
}
786
 
787
/*
788
 * Concatenate mbuf chain n to m.
789
 * Both chains must be of the same type (e.g. MT_DATA).
790
 * Any m_pkthdr is not updated.
791
 */
792
void
793
m_cat(m, n)
794
        register struct mbuf *m, *n;
795
{
796
        while (m->m_next)
797
                m = m->m_next;
798
        while (n) {
799
                if (m->m_flags & M_EXT ||
800
                    m->m_data + m->m_len + n->m_len >= &m->m_dat[MLEN]) {
801
                        /* just join the two chains */
802
                        m->m_next = n;
803
                        return;
804
                }
805
                /* splat the data from one into the other */
806
                bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
807
                    (u_int)n->m_len);
808
                m->m_len += n->m_len;
809
                n = m_free(n);
810
        }
811
}
812
 
813
void
814
m_adj(mp, req_len)
815
        struct mbuf *mp;
816
        int req_len;
817
{
818
        register int len = req_len;
819
        register struct mbuf *m;
820
        register int count;
821
 
822
        if ((m = mp) == NULL)
823
                return;
824
        if (len >= 0) {
825
                /*
826
                 * Trim from head.
827
                 */
828
                while (m != NULL && len > 0) {
829
                        if (m->m_len <= len) {
830
                                len -= m->m_len;
831
                                m->m_len = 0;
832
                                m = m->m_next;
833
                        } else {
834
                                m->m_len -= len;
835
                                m->m_data += len;
836
                                len = 0;
837
                        }
838
                }
839
                m = mp;
840
                if (mp->m_flags & M_PKTHDR)
841
                        m->m_pkthdr.len -= (req_len - len);
842
        } else {
843
                /*
844
                 * Trim from tail.  Scan the mbuf chain,
845
                 * calculating its length and finding the last mbuf.
846
                 * If the adjustment only affects this mbuf, then just
847
                 * adjust and return.  Otherwise, rescan and truncate
848
                 * after the remaining size.
849
                 */
850
                len = -len;
851
                count = 0;
852
                for (;;) {
853
                        count += m->m_len;
854
                        if (m->m_next == (struct mbuf *)0)
855
                                break;
856
                        m = m->m_next;
857
                }
858
                if (m->m_len >= len) {
859
                        m->m_len -= len;
860
                        if (mp->m_flags & M_PKTHDR)
861
                                mp->m_pkthdr.len -= len;
862
                        return;
863
                }
864
                count -= len;
865
                if (count < 0)
866
                        count = 0;
867
                /*
868
                 * Correct length for chain is "count".
869
                 * Find the mbuf with last data, adjust its length,
870
                 * and toss data from remaining mbufs on chain.
871
                 */
872
                m = mp;
873
                if (m->m_flags & M_PKTHDR)
874
                        m->m_pkthdr.len = count;
875
                for (; m; m = m->m_next) {
876
                        if (m->m_len >= count) {
877
                                m->m_len = count;
878
                                break;
879
                        }
880
                        count -= m->m_len;
881
                }
882
                while (m->m_next)
883
                        (m = m->m_next) ->m_len = 0;
884
        }
885
}
886
 
887
/*
888
 * Rearange an mbuf chain so that len bytes are contiguous
889
 * and in the data area of an mbuf (so that mtod and dtom
890
 * will work for a structure of size len).  Returns the resulting
891
 * mbuf chain on success, frees it and returns null on failure.
892
 * If there is room, it will add up to max_protohdr-len extra bytes to the
893
 * contiguous region in an attempt to avoid being called next time.
894
 */
895
#define MPFail (mbstat.m_mpfail)
896
 
897
struct mbuf *
898
m_pullup(n, len)
899
        register struct mbuf *n;
900
        int len;
901
{
902
        register struct mbuf *m;
903
        register int count;
904
        int space;
905
 
906
        /*
907
         * If first mbuf has no cluster, and has room for len bytes
908
         * without shifting current data, pullup into it,
909
         * otherwise allocate a new mbuf to prepend to the chain.
910
         */
911
        if ((n->m_flags & M_EXT) == 0 &&
912
            n->m_data + len < &n->m_dat[MLEN] && n->m_next) {
913
                if (n->m_len >= len)
914
                        return (n);
915
                m = n;
916
                n = n->m_next;
917
                len -= m->m_len;
918
        } else {
919
                if (len > MHLEN)
920
                        goto bad;
921
                MGET(m, M_DONTWAIT, n->m_type);
922
                if (m == 0)
923
                        goto bad;
924
                m->m_len = 0;
925
                if (n->m_flags & M_PKTHDR) {
926
                        M_COPY_PKTHDR(m, n);
927
                        n->m_flags &= ~M_PKTHDR;
928
                }
929
        }
930
        space = &m->m_dat[MLEN] - (m->m_data + m->m_len);
931
        do {
932
                count = min(min(max(len, max_protohdr), space), n->m_len);
933
                bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len,
934
                  (unsigned)count);
935
                len -= count;
936
                m->m_len += count;
937
                n->m_len -= count;
938
                space -= count;
939
                if (n->m_len)
940
                        n->m_data += count;
941
                else
942
                        n = m_free(n);
943
        } while (len > 0 && n);
944
        if (len > 0) {
945
                (void) m_free(m);
946
                goto bad;
947
        }
948
        m->m_next = n;
949
        return (m);
950
bad:
951
        m_freem(n);
952
        MPFail++;
953
        return (0);
954
}
955
 
956
/*
957
 * Partition an mbuf chain in two pieces, returning the tail --
958
 * all but the first len0 bytes.  In case of failure, it returns NULL and
959
 * attempts to restore the chain to its original state.
960
 */
961
struct mbuf *
962
m_split(m0, len0, wait)
963
        register struct mbuf *m0;
964
        int len0, wait;
965
{
966
        register struct mbuf *m, *n;
967
        unsigned len = len0, remain;
968
 
969
        for (m = m0; m && len > m->m_len; m = m->m_next)
970
                len -= m->m_len;
971
        if (m == 0)
972
                return (0);
973
        remain = m->m_len - len;
974
        if (m0->m_flags & M_PKTHDR) {
975
                MGETHDR(n, wait, m0->m_type);
976
                if (n == 0)
977
                        return (0);
978
                n->m_pkthdr.rcvif = m0->m_pkthdr.rcvif;
979
                n->m_pkthdr.len = m0->m_pkthdr.len - len0;
980
                m0->m_pkthdr.len = len0;
981
                if (m->m_flags & M_EXT)
982
                        goto extpacket;
983
                if (remain > MHLEN) {
984
                        /* m can't be the lead packet */
985
                        MH_ALIGN(n, 0);
986
                        n->m_next = m_split(m, len, wait);
987
                        if (n->m_next == 0) {
988
                                (void) m_free(n);
989
                                return (0);
990
                        } else
991
                                return (n);
992
                } else
993
                        MH_ALIGN(n, remain);
994
        } else if (remain == 0) {
995
                n = m->m_next;
996
                m->m_next = 0;
997
                return (n);
998
        } else {
999
                MGET(n, wait, m->m_type);
1000
                if (n == 0)
1001
                        return (0);
1002
                M_ALIGN(n, remain);
1003
        }
1004
extpacket:
1005
        if (m->m_flags & M_EXT) {
1006
                n->m_flags |= M_EXT;
1007
                n->m_ext = m->m_ext;
1008
                if(!m->m_ext.ext_ref)
1009
                        mclrefcnt[mtocl(m->m_ext.ext_buf)]++;
1010
                else
1011
                        (*(m->m_ext.ext_ref))(m->m_ext.ext_buf,
1012
                                                m->m_ext.ext_size);
1013
                m->m_ext.ext_size = 0; /* For Accounting XXXXXX danger */
1014
                n->m_data = m->m_data + len;
1015
        } else {
1016
                bcopy(mtod(m, caddr_t) + len, mtod(n, caddr_t), remain);
1017
        }
1018
        n->m_len = remain;
1019
        m->m_len = len;
1020
        n->m_next = m->m_next;
1021
        m->m_next = 0;
1022
        return (n);
1023
}
1024
/*
1025
 * Routine to copy from device local memory into mbufs.
1026
 */
1027
struct mbuf *
1028
m_devget(buf, totlen, off0, ifp, copy)
1029
        char *buf;
1030
        int totlen, off0;
1031
        struct ifnet *ifp;
1032
        void (*copy) __P((char *from, caddr_t to, u_int len));
1033
{
1034
        register struct mbuf *m;
1035
        struct mbuf *top = 0, **mp = &top;
1036
        register int off = off0, len;
1037
        register char *cp;
1038
        char *epkt;
1039
 
1040
        cp = buf;
1041
        epkt = cp + totlen;
1042
        if (off) {
1043
                cp += off + 2 * sizeof(u_short);
1044
                totlen -= 2 * sizeof(u_short);
1045
        }
1046
        MGETHDR(m, M_DONTWAIT, MT_DATA);
1047
        if (m == 0)
1048
                return (0);
1049
        m->m_pkthdr.rcvif = ifp;
1050
        m->m_pkthdr.len = totlen;
1051
        m->m_len = MHLEN;
1052
 
1053
        while (totlen > 0) {
1054
                if (top) {
1055
                        MGET(m, M_DONTWAIT, MT_DATA);
1056
                        if (m == 0) {
1057
                                m_freem(top);
1058
                                return (0);
1059
                        }
1060
                        m->m_len = MLEN;
1061
                }
1062
                len = min(totlen, epkt - cp);
1063
                if (len >= MINCLSIZE) {
1064
                        MCLGET(m, M_DONTWAIT);
1065
                        if (m->m_flags & M_EXT)
1066
                                m->m_len = len = min(len, MCLBYTES);
1067
                        else
1068
                                len = m->m_len;
1069
                } else {
1070
                        /*
1071
                         * Place initial small packet/header at end of mbuf.
1072
                         */
1073
                        if (len < m->m_len) {
1074
                                if (top == 0 && len + max_linkhdr <= m->m_len)
1075
                                        m->m_data += max_linkhdr;
1076
                                m->m_len = len;
1077
                        } else
1078
                                len = m->m_len;
1079
                }
1080
                if (copy)
1081
                        copy(cp, mtod(m, caddr_t), (unsigned)len);
1082
                else
1083
                        bcopy(cp, mtod(m, caddr_t), (unsigned)len);
1084
                cp += len;
1085
                *mp = m;
1086
                mp = &m->m_next;
1087
                totlen -= len;
1088
                if (cp == epkt)
1089
                        cp = buf;
1090
        }
1091
        return (top);
1092
}
1093
 
1094
/*
1095
 * Copy data from a buffer back into the indicated mbuf chain,
1096
 * starting "off" bytes from the beginning, extending the mbuf
1097
 * chain if necessary.
1098
 */
1099
void
1100
m_copyback(m0, off, len, cp)
1101
        struct  mbuf *m0;
1102
        register int off;
1103
        register int len;
1104
        caddr_t cp;
1105
{
1106
        register int mlen;
1107
        register struct mbuf *m = m0, *n;
1108
        int totlen = 0;
1109
 
1110
        if (m0 == 0)
1111
                return;
1112
        while (off > (mlen = m->m_len)) {
1113
                off -= mlen;
1114
                totlen += mlen;
1115
                if (m->m_next == 0) {
1116
                        n = m_getclr(M_DONTWAIT, m->m_type);
1117
                        if (n == 0)
1118
                                goto out;
1119
                        n->m_len = min(MLEN, len + off);
1120
                        m->m_next = n;
1121
                }
1122
                m = m->m_next;
1123
        }
1124
        while (len > 0) {
1125
                mlen = min (m->m_len - off, len);
1126
                bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
1127
                cp += mlen;
1128
                len -= mlen;
1129
                mlen += off;
1130
                off = 0;
1131
                totlen += mlen;
1132
                if (len == 0)
1133
                        break;
1134
                if (m->m_next == 0) {
1135
                        n = m_get(M_DONTWAIT, m->m_type);
1136
                        if (n == 0)
1137
                                break;
1138
                        n->m_len = min(MLEN, len);
1139
                        m->m_next = n;
1140
                }
1141
                m = m->m_next;
1142
        }
1143
out:    if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
1144
                m->m_pkthdr.len = totlen;
1145
}
1146
 
1147
#ifndef __ECOS
1148
void
1149
m_print(const struct mbuf *m)
1150
{
1151
        int len;
1152
        const struct mbuf *m2;
1153
 
1154
        len = m->m_pkthdr.len;
1155
        m2 = m;
1156
        while (len) {
1157
                printf("%p %*D\n", m2, m2->m_len, (u_char *)m2->m_data, "-");
1158
                len -= m2->m_len;
1159
                m2 = m2->m_next;
1160
        }
1161
        return;
1162
}
1163
#endif
1164
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.