OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [net/] [bsd_tcpip/] [v2_0/] [src/] [ecos/] [synch.c] - Blame information for rev 174

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//      src/ecos/synch.c
4
//
5
//==========================================================================
6
//####BSDCOPYRIGHTBEGIN####
7
//
8
// -------------------------------------------
9
//
10
// Portions of this software may have been derived from OpenBSD, 
11
// FreeBSD or other sources, and are covered by the appropriate
12
// copyright disclaimers included herein.
13
//
14
// Portions created by Red Hat are
15
// Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
16
//
17
// -------------------------------------------
18
//
19
//####BSDCOPYRIGHTEND####
20
//==========================================================================
21
 
22
//==========================================================================
23
//
24
//      ecos/synch.c
25
//
26
//      eCos wrapper and synch functions
27
//
28
//==========================================================================
29
//####BSDCOPYRIGHTBEGIN####
30
//
31
// -------------------------------------------
32
//
33
// Portions of this software may have been derived from OpenBSD or other sources,
34
// and are covered by the appropriate copyright disclaimers included herein.
35
//
36
// -------------------------------------------
37
//
38
//####BSDCOPYRIGHTEND####
39
//==========================================================================
40
//#####DESCRIPTIONBEGIN####
41
//
42
// Author(s):    gthomas, hmt
43
// Contributors: gthomas, hmt
44
// Date:         2000-01-10
45
// Purpose:      
46
// Description:  
47
//              
48
//
49
//####DESCRIPTIONEND####
50
//
51
//==========================================================================
52
 
53
 
54
// Synch routines, etc., used by network code
55
 
56
#include <sys/param.h>
57
#include <pkgconf/net.h>
58
 
59
#include <cyg/infra/diag.h>
60
#include <cyg/hal/hal_intr.h>
61
#include <cyg/kernel/kapi.h>
62
 
63
#include <cyg/infra/cyg_ass.h>
64
 
65
//---------------------------- splx() emulation ------------------------------
66
// This contains both the SPLX stuff and tsleep/wakeup - because those must
67
// be SPLX aware.  They release the SPLX lock when sleeping, and reclaim it
68
// (if needs be) at wakeup.
69
//
70
// The variable spl_state (and the associated bit patterns) is used to keep
71
// track of the "splx()" level.  This is an artifact of the original stack,
72
// based on the BSD interrupt world (interrupts and processing could be
73
// masked based on a level value, supported by hardware).  This is not very
74
// real-time, so the emulation uses proper eCos tools and techniques to
75
// accomplish the same result.  The key here is in the analysis of the
76
// various "levels", why they are used, etc.
77
//
78
// SPL_IMP is called in order to protect internal data structures
79
// short-term, primarily so that interrupt processing does not interfere
80
// with them.
81
// 
82
// SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
83
// time passes while the stamp is being taken (since it is a potentially
84
// non-idempotent data structure).
85
//
86
// SPL_SOFTNET is used to prevent all other stack processing, including
87
// interrupts (DSRs), etc.
88
//
89
// SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
90
// runs what should really be the network interface device's DSR, and any
91
// timeout routines that are scheduled.  (They are broken out into a thread
92
// to isolate the network locking from the rest of the system)
93
//
94
// NB a thread in thi state can tsleep(); see below.  Tsleep releases and
95
// reclaims the locks and so on.  This necessary because of the possible
96
// conflict where
97
//     I splsoft
98
//     I tsleep
99
//     He runs, he is lower priority
100
//     He splsofts
101
//     He or something else awakens me
102
//     I want to run, but he has splsoft, so I wait
103
//     He runs and releases splsoft
104
//     I awaken and go.
105
 
106
static volatile cyg_uint32 spl_state = 0;
107
#define SPL_IMP      0x01
108
#define SPL_NET      0x02
109
#define SPL_CLOCK    0x04
110
#define SPL_SOFTNET  0x08
111
#define SPL_INTERNAL 0x10
112
 
113
static cyg_mutex_t splx_mutex;
114
static volatile cyg_handle_t splx_thread;
115
 
116
 
117
#ifdef CYGIMPL_TRACE_SPLX   
118
#define SPLXARGS const char *file, const int line
119
#define SPLXMOREARGS , const char *file, const int line
120
#define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
121
#else
122
#define SPLXARGS void
123
#define SPLXMOREARGS
124
#define SPLXTRACE
125
#endif
126
 
127
 
128
static inline cyg_uint32
129
spl_any( cyg_uint32 which )
130
{
131
    cyg_uint32 old_spl = spl_state;
132
    if ( cyg_thread_self() != splx_thread ) {
133
        cyg_mutex_lock( &splx_mutex );
134
        old_spl = 0; // Free when we unlock this context
135
        CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
136
        CYG_ASSERT( 0 == spl_state, "spl still set" );
137
        splx_thread = cyg_thread_self();
138
    }
139
    CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
140
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
141
                "spl_any: mutex not mine" );
142
    spl_state |= which;
143
    return old_spl;
144
}
145
 
146
 
147
cyg_uint32
148
cyg_splimp(SPLXARGS)
149
{
150
    SPLXTRACE;
151
    return spl_any( SPL_IMP );
152
}
153
 
154
cyg_uint32
155
cyg_splclock(SPLXARGS)
156
{
157
    SPLXTRACE;
158
    return spl_any( SPL_CLOCK );
159
}
160
 
161
cyg_uint32
162
cyg_splnet(SPLXARGS)
163
{
164
    SPLXTRACE;
165
    return spl_any( SPL_NET );
166
}
167
 
168
cyg_uint32
169
cyg_splhigh(SPLXARGS)
170
{
171
    SPLXTRACE;
172
    // splhigh did SPLSOFTNET in the contrib, so this is the same
173
    return spl_any( SPL_SOFTNET );
174
}
175
 
176
cyg_uint32
177
cyg_splsoftnet(SPLXARGS)
178
{
179
    SPLXTRACE;
180
    return spl_any( SPL_SOFTNET );
181
}
182
 
183
cyg_uint32
184
cyg_splinternal(SPLXARGS)
185
{
186
    SPLXTRACE;
187
    return spl_any( SPL_INTERNAL );
188
}
189
 
190
 
191
//
192
// Return to a previous interrupt state/level.
193
//
194
void
195
cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
196
{
197
    SPLXTRACE;
198
 
199
    CYG_ASSERT( 0 != spl_state, "No state set" );
200
    CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
201
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
202
                "splx: mutex not mine" );
203
 
204
    spl_state &= old_state;
205
 
206
    if ( 0 == spl_state ) {
207
        splx_thread = 0;
208
        cyg_mutex_unlock( &splx_mutex );
209
    }
210
}
211
 
212
//------------------ tsleep() and wakeup() emulation ---------------------------
213
//
214
// Structure used to keep track of 'tsleep' style events
215
//
216
struct wakeup_event {
217
    void *chan;
218
    cyg_sem_t sem;
219
};
220
static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
221
 
222
 
223
// Called to initialize structures used by timeout functions
224
void
225
cyg_tsleep_init(void)
226
{
227
    int i;
228
    struct wakeup_event *ev;
229
    // Create list of "wakeup event" semaphores
230
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
231
        ev->chan = 0;
232
        cyg_semaphore_init(&ev->sem, 0);
233
    }
234
    // Initialize the mutex and thread id:
235
    cyg_mutex_init( &splx_mutex );
236
    splx_thread = 0;
237
}
238
 
239
 
240
//
241
// Signal an event
242
void
243
cyg_wakeup(void *chan)
244
{
245
    int i;
246
    struct wakeup_event *ev;
247
    cyg_scheduler_lock(); // Ensure scan is safe
248
    // NB this is broadcast semantics because a sleeper/wakee holds the
249
    // slot until they exit.  This avoids a race condition whereby the
250
    // semaphore can get an extra post - and then the slot is freed, so the
251
    // sem wait returns immediately, AOK, so the slot wasn't freed.
252
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++)
253
        if (ev->chan == chan)
254
            cyg_semaphore_post(&ev->sem);
255
 
256
    cyg_scheduler_unlock();
257
}
258
 
259
// ------------------------------------------------------------------------
260
// Wait for an event with timeout
261
//   tsleep(event, priority, state, timeout)
262
//     event - the thing to wait for
263
//     priority - unused
264
//     state    - a descriptive message
265
//     timeout  - max time (in ticks) to wait
266
//   returns:
267
//     0         - event was "signalled"
268
//     ETIMEDOUT - timeout occurred
269
//     EINTR     - thread broken out of sleep
270
//
271
int
272
cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
273
{
274
    int i, res = 0;
275
    struct wakeup_event *ev;
276
    cyg_tick_count_t sleep_time;
277
    cyg_handle_t self = cyg_thread_self();
278
    int old_splflags = 0; // no flags held
279
 
280
    cyg_scheduler_lock();
281
 
282
    // Safely find a free slot:
283
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
284
        if (ev->chan == 0) {
285
            ev->chan = chan;
286
            break;
287
        }
288
    }
289
    CYG_ASSERT( i <  CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
290
    CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
291
                "Tsleep - called with scheduler locked" );
292
    // Defensive:
293
    if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
294
        cyg_scheduler_unlock();
295
        return ETIMEDOUT;
296
    }
297
 
298
    // If we are the owner, then we must release the mutex when
299
    // we wait.
300
    if ( self == splx_thread ) {
301
        old_splflags = spl_state; // Keep them for restoration
302
        CYG_ASSERT( spl_state, "spl_state not set" );
303
        // Also want to assert that the mutex is locked...
304
        CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
305
        CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
306
        splx_thread = 0;
307
        spl_state = 0;
308
        cyg_mutex_unlock( &splx_mutex );
309
    }
310
 
311
    // Re-initialize the semaphore - it might have counted up arbitrarily
312
    // in the time between a prior sleeper being signalled and them
313
    // actually running.
314
    cyg_semaphore_init(&ev->sem, 0);
315
 
316
    // This part actually does the wait:
317
    // As of the new kernel, we can do this without unlocking the scheduler
318
    if (timo) {
319
        sleep_time = cyg_current_time() + timo;
320
        if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
321
            if( cyg_current_time() >= sleep_time )
322
                res = ETIMEDOUT;
323
            else
324
                res = EINTR;
325
        }
326
    } else {
327
        if (!cyg_semaphore_wait(&ev->sem) ) {
328
            res = EINTR;
329
        }
330
    }
331
 
332
    ev->chan = 0;  // Free the slot - the wakeup call cannot do this.
333
 
334
    if ( old_splflags ) { // restore to previous state
335
        // As of the new kernel, we can do this with the scheduler locked
336
        cyg_mutex_lock( &splx_mutex ); // this might wait
337
        CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
338
        CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
339
        splx_thread = self; // got it now...
340
        spl_state = old_splflags;
341
    }
342
 
343
    cyg_scheduler_unlock();
344
    return res;
345
}
346
 
347
 
348
 
349
// ------------------------------------------------------------------------
350
// DEBUGGING ROUTINES
351
#ifdef CYGIMPL_TRACE_SPLX   
352
#undef cyg_scheduler_lock
353
#undef cyg_scheduler_safe_lock
354
#undef cyg_scheduler_unlock
355
 
356
#define MAX_SCHED_EVENTS 256
357
static struct _sched_event {
358
    char *fun, *file;
359
    int line, lock;
360
} sched_event[MAX_SCHED_EVENTS];
361
static int next_sched_event = 0;
362
static int total_sched_events = 0;
363
 
364
static void
365
do_sched_event(char *fun, char *file, int line, int lock)
366
{
367
    struct _sched_event *se = &sched_event[next_sched_event];
368
    if (++next_sched_event == MAX_SCHED_EVENTS) {
369
        next_sched_event = 0;
370
    }
371
    se->fun = fun;
372
    se->file = file;
373
    se->line = line;
374
    se->lock = lock;
375
    total_sched_events++;
376
}
377
 
378
static void
379
show_sched_events(void)
380
{
381
    int i;
382
    struct _sched_event *se;
383
    if (total_sched_events < MAX_SCHED_EVENTS) {
384
        i = 0;
385
    } else {
386
        i = next_sched_event + 1;
387
        if (i == MAX_SCHED_EVENTS) i = 0;
388
    }
389
    diag_printf("%d total scheduler events\n", total_sched_events);
390
    while (i != next_sched_event) {
391
        se = &sched_event[i];
392
        diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
393
        if (++i == MAX_SCHED_EVENTS) i = 0;
394
    }
395
}
396
 
397
#define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
398
 
399
void
400
_cyg_scheduler_lock(char *file, int line)
401
{
402
    cyg_scheduler_lock();
403
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
404
}
405
 
406
void
407
_cyg_scheduler_safe_lock(char *file, int line)
408
{
409
    cyg_scheduler_safe_lock();
410
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
411
}
412
 
413
void
414
_cyg_scheduler_unlock(char *file, int line)
415
{
416
    cyg_scheduler_unlock();
417
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
418
}
419
#endif // CYGIMPL_TRACE_SPLX
420
 
421
// EOF synch.c

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.