OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [services/] [memalloc/] [common/] [v2_0/] [include/] [memjoin.inl] - Blame information for rev 459

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
#ifndef CYGONCE_MEMALLOC_MEMJOIN_INL
2
#define CYGONCE_MEMALLOC_MEMJOIN_INL
3
 
4
//==========================================================================
5
//
6
//      memjoin.inl
7
//
8
//      Pseudo memory pool used to join together other memory pools
9
//
10
//==========================================================================
11
//####ECOSGPLCOPYRIGHTBEGIN####
12
// -------------------------------------------
13
// This file is part of eCos, the Embedded Configurable Operating System.
14
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
15
//
16
// eCos is free software; you can redistribute it and/or modify it under
17
// the terms of the GNU General Public License as published by the Free
18
// Software Foundation; either version 2 or (at your option) any later version.
19
//
20
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
21
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
22
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
23
// for more details.
24
//
25
// You should have received a copy of the GNU General Public License along
26
// with eCos; if not, write to the Free Software Foundation, Inc.,
27
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28
//
29
// As a special exception, if other files instantiate templates or use macros
30
// or inline functions from this file, or you compile this file and link it
31
// with other works to produce a work based on this file, this file does not
32
// by itself cause the resulting work to be covered by the GNU General Public
33
// License. However the source code for this file must still be made available
34
// in accordance with section (3) of the GNU General Public License.
35
//
36
// This exception does not invalidate any other reasons why a work based on
37
// this file might be covered by the GNU General Public License.
38
//
39
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
40
// at http://sources.redhat.com/ecos/ecos-license/
41
// -------------------------------------------
42
//####ECOSGPLCOPYRIGHTEND####
43
//==========================================================================
44
//#####DESCRIPTIONBEGIN####
45
//
46
// Author(s):    jlarmour
47
// Contributors:
48
// Date:         2000-06-12
49
// Purpose:      Implement joined up memory pool class interface
50
// Description:  Inline class for constructing a pseudo allocator that contains
51
//               multiple other allocators. It caters solely to the requirements
52
//               of the malloc implementation.
53
// Usage:        #include 
54
//
55
//
56
//####DESCRIPTIONEND####
57
//
58
//==========================================================================
59
 
60
// CONFIGURATION
61
 
62
#include 
63
 
64
// INCLUDES
65
 
66
#include         // types
67
#include          // assertion macros
68
#include         // tracing macros
69
#include     // header for this file just in case
70
 
71
 
72
// FUNCTIONS
73
 
74
 
75
// -------------------------------------------------------------------------
76
// find_pool_for_ptr returns the pool that ptr came from
77
 
78
template 
79
inline T *
80
Cyg_Mempool_Joined::find_pool_for_ptr( const cyg_uint8 *ptr )
81
{
82
    cyg_uint8 i;
83
 
84
    for ( i=0; i < poolcount; i++ ) {
85
        if ( ptr >= pools[i].startaddr &&
86
             ptr < pools[i].endaddr ) {
87
            return pools[i].pool;
88
        } // if
89
    } // for
90
    return NULL;
91
} // Cyg_Mempool_Joined::find_pool_for_ptr()
92
 
93
 
94
// -------------------------------------------------------------------------
95
// Constructor
96
template 
97
inline
98
Cyg_Mempool_Joined::Cyg_Mempool_Joined( cyg_uint8 num_heaps, T *heaps[] )
99
{
100
    Cyg_Mempool_Status stat;
101
    cyg_uint8 i;
102
 
103
    CYG_REPORT_FUNCTION();
104
    CYG_REPORT_FUNCARG2( "num_heaps=%u, heaps=%08x", (int)num_heaps, heaps );
105
 
106
    CYG_CHECK_DATA_PTRC( heaps );
107
 
108
    poolcount = num_heaps;
109
 
110
    // allocate internal structures - this should work because we should be
111
    // the first allocation for this pool; and if there isn't enough space
112
    // for these teeny bits, what hope is there!
113
    for (i=0; i
114
        pools = (struct pooldesc *)
115
            heaps[i]->try_alloc( num_heaps * sizeof(struct pooldesc) );
116
        if ( NULL != pools )
117
            break;
118
    } // for
119
 
120
    CYG_ASSERT( pools != NULL,
121
                "Couldn't allocate internal structures from any pools!");
122
 
123
    // now set up internal structures
124
    for (i=0; i
125
        pools[i].pool = heaps[i];
126
        heaps[i]->get_status( CYG_MEMPOOL_STAT_ARENABASE|
127
                              CYG_MEMPOOL_STAT_ARENASIZE,
128
                              stat );
129
 
130
        CYG_ASSERT( stat.arenabase != (const cyg_uint8 *)-1,
131
                    "pool returns valid pool base" );
132
        CYG_CHECK_DATA_PTR( stat.arenabase, "Bad arena location" );
133
        CYG_ASSERT( stat.arenasize > 0, "pool returns valid pool size" );
134
 
135
        pools[i].startaddr = stat.arenabase;
136
        pools[i].endaddr = stat.arenabase + stat.arenasize;
137
    } // for
138
 
139
    CYG_REPORT_RETURN();
140
} // Cyg_Mempool_Joined::Cyg_Mempool_Joined()
141
 
142
 
143
 
144
// -------------------------------------------------------------------------
145
// Destructor
146
template 
147
inline
148
Cyg_Mempool_Joined::~Cyg_Mempool_Joined()
149
{
150
    CYG_REPORT_FUNCTION();
151
    CYG_REPORT_FUNCARGVOID();
152
 
153
    cyg_bool freestat;
154
 
155
    freestat = free( (cyg_uint8 *)pools, poolcount * sizeof(struct pooldesc) );
156
    CYG_ASSERT( freestat, "free failed!");
157
    CYG_REPORT_RETURN();
158
} // Cyg_Mempool_Joined::~Cyg_Mempool_Joined()
159
 
160
 
161
 
162
// -------------------------------------------------------------------------
163
// get some memory, return NULL if none available
164
template 
165
inline cyg_uint8 *
166
Cyg_Mempool_Joined::try_alloc( cyg_int32 size )
167
{
168
    cyg_uint8 i;
169
    cyg_uint8 *ptr=NULL;
170
 
171
    CYG_REPORT_FUNCTYPE( "returning memory at addr %08x" );
172
    CYG_REPORT_FUNCARG1DV( size );
173
 
174
    for (i=0; i
175
        ptr = pools[i].pool->try_alloc( size );
176
        if ( NULL != ptr )
177
            break;
178
    }
179
 
180
    CYG_REPORT_RETVAL( ptr );
181
    return ptr;
182
} // Cyg_Mempool_Joined::try_alloc()
183
 
184
 
185
// -------------------------------------------------------------------------
186
// resize existing allocation, if oldsize is non-NULL, previous
187
// allocation size is placed into it. If previous size not available,
188
// it is set to 0. NB previous allocation size may have been rounded up.
189
// Occasionally the allocation can be adjusted *backwards* as well as,
190
// or instead of forwards, therefore the address of the resized
191
// allocation is returned, or NULL if no resizing was possible.
192
// Note that this differs from ::realloc() in that no attempt is
193
// made to call malloc() if resizing is not possible - that is left
194
// to higher layers. The data is copied from old to new though.
195
// The effects of alloc_ptr==NULL or newsize==0 are undefined
196
template 
197
inline cyg_uint8 *
198
Cyg_Mempool_Joined::resize_alloc( cyg_uint8 *alloc_ptr, cyg_int32 newsize,
199
                                     cyg_int32 *oldsize )
200
{
201
    T *pool;
202
    cyg_uint8 * ret;
203
 
204
    CYG_REPORT_FUNCTYPE( "success=" );
205
    CYG_REPORT_FUNCARG3( "alloc_ptr=%08x, newsize=%d, &oldsize=%08x",
206
                        alloc_ptr, newsize, oldsize );
207
    CYG_CHECK_DATA_PTRC( alloc_ptr );
208
    if (NULL != oldsize )
209
        CYG_CHECK_DATA_PTRC( oldsize );
210
 
211
    pool = find_pool_for_ptr( alloc_ptr );
212
    CYG_ASSERT( NULL != pool, "Couldn't find pool for pointer!" );
213
 
214
    ret = pool->resize_alloc( alloc_ptr, newsize, oldsize );
215
 
216
    CYG_REPORT_RETVAL( ret );
217
    return ret;
218
} // Cyg_Mempool_Joined::resize_alloc()
219
 
220
 
221
// -------------------------------------------------------------------------
222
// free the memory back to the pool
223
// returns true on success
224
template 
225
inline cyg_bool
226
Cyg_Mempool_Joined::free( cyg_uint8 *ptr, cyg_int32 size )
227
{
228
    T *pool;
229
    cyg_bool ret;
230
 
231
    CYG_REPORT_FUNCTYPE("success=");
232
    CYG_REPORT_FUNCARG2( "ptr=%08x, size=%d", ptr, size );
233
    CYG_CHECK_DATA_PTRC( ptr );
234
 
235
    pool = find_pool_for_ptr( ptr );
236
    CYG_ASSERT( NULL != pool, "Couldn't find pool for pointer!" );
237
 
238
    ret = pool->free( ptr, size );
239
 
240
    CYG_REPORT_RETVAL( ret );
241
    return ret;
242
} // Cyg_Mempool_Joined::free()
243
 
244
 
245
// -------------------------------------------------------------------------
246
// Get memory pool status
247
// flags is a bitmask of requested fields to fill in. The flags are
248
// defined in common.hxx
249
template 
250
inline void
251
Cyg_Mempool_Joined::get_status( cyg_mempool_status_flag_t flags,
252
                                Cyg_Mempool_Status &status )
253
{
254
    cyg_uint8 i;
255
    Cyg_Mempool_Status tmpstat;
256
 
257
    status.arenasize      = status.freeblocks = 0;
258
    status.totalallocated = status.totalfree  = 0;
259
    status.maxfree        = status.origsize   = 0;
260
 
261
    for ( i=0; i
262
        if ( status.arenasize >= 0 ) {
263
            if ( 0 != (flags & CYG_MEMPOOL_STAT_ARENASIZE) ) {
264
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_ARENASIZE,
265
                                           tmpstat );
266
                if ( tmpstat.arenasize > 0)
267
                    status.arenasize += tmpstat.arenasize;
268
                else
269
                    status.arenasize = -1;
270
            } // if
271
        } // if
272
 
273
        if ( status.freeblocks >= 0 ) {
274
            if ( 0 != (flags & CYG_MEMPOOL_STAT_FREEBLOCKS) ) {
275
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_FREEBLOCKS,
276
                                           tmpstat );
277
                if ( tmpstat.freeblocks > 0 )
278
                    status.freeblocks += tmpstat.freeblocks;
279
                else
280
                    status.freeblocks = -1;
281
            } // if
282
        } // if
283
 
284
        if ( status.totalallocated >= 0 ) {
285
            if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALALLOCATED) ) {
286
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_TOTALALLOCATED,
287
                                           tmpstat );
288
                if ( tmpstat.totalallocated > 0 )
289
                    status.totalallocated += tmpstat.totalallocated;
290
                else
291
                    status.totalallocated = -1;
292
            } // if
293
        } // if
294
 
295
        if ( status.totalfree >= 0 ) {
296
            if ( 0 != (flags & CYG_MEMPOOL_STAT_TOTALFREE) ) {
297
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_TOTALFREE,
298
                                           tmpstat );
299
                if ( tmpstat.totalfree > 0 )
300
                    status.totalfree += tmpstat.totalfree;
301
                else
302
                    status.totalfree = -1;
303
            } // if
304
        } // if
305
 
306
        if ( status.maxfree >= 0 ) {
307
            if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXFREE) ) {
308
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_MAXFREE, tmpstat );
309
                if ( tmpstat.maxfree < 0 )
310
                    status.maxfree = -1;
311
                else if ( tmpstat.maxfree > status.maxfree )
312
                    status.maxfree = tmpstat.maxfree;
313
            } // if
314
        } // if
315
 
316
        if ( status.origsize >= 0 ) {
317
            if ( 0 != (flags & CYG_MEMPOOL_STAT_ORIGSIZE) ) {
318
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_ORIGSIZE, tmpstat );
319
                if ( tmpstat.origsize > 0 )
320
                    status.origsize += tmpstat.origsize;
321
                else
322
                    status.origsize = -1;
323
            } // if
324
        } // if
325
 
326
        if ( status.maxoverhead >= 0 ) {
327
            if ( 0 != (flags & CYG_MEMPOOL_STAT_MAXOVERHEAD) ) {
328
                pools[i].pool->get_status( CYG_MEMPOOL_STAT_MAXOVERHEAD,
329
                                           tmpstat );
330
                if ( tmpstat.maxoverhead < 0 )
331
                    status.maxoverhead = -1;
332
                else if ( tmpstat.maxoverhead > status.maxoverhead )
333
                    status.maxoverhead = tmpstat.maxoverhead;
334
            } // if
335
        } // if
336
    } // for
337
} // Cyg_Mempool_Joined::get_status()
338
 
339
 
340
// -------------------------------------------------------------------------
341
 
342
#endif // ifndef CYGONCE_MEMALLOC_MEMJOIN_INL
343
// EOF memjoin.inl

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.