OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [net/] [tcpip/] [current/] [src/] [ecos/] [synch.c] - Blame information for rev 786

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
//==========================================================================
2
//
3
//      ecos/synch.c
4
//
5
//      eCos wrapper and synch functions
6
//
7
//==========================================================================
8
// ####BSDALTCOPYRIGHTBEGIN####                                             
9
// -------------------------------------------                              
10
// Portions of this software may have been derived from OpenBSD             
11
// or other sources, and if so are covered by the appropriate copyright     
12
// and license included herein.                                             
13
// -------------------------------------------                              
14
// ####BSDALTCOPYRIGHTEND####                                               
15
//==========================================================================
16
//#####DESCRIPTIONBEGIN####
17
//
18
// Author(s):    gthomas, hmt
19
// Contributors: gthomas, hmt
20
// Date:         2000-01-10
21
// Purpose:      
22
// Description:  
23
//              
24
//
25
//####DESCRIPTIONEND####
26
//
27
//==========================================================================
28
 
29
 
30
// Synch routines, etc., used by network code
31
 
32
#include <sys/param.h>
33
#include <sys/malloc.h>
34
#include <sys/mbuf.h>
35
#include <sys/kernel.h>
36
#include <sys/domain.h>
37
#include <sys/protosw.h>
38
#include <sys/sockio.h>
39
#include <sys/socket.h>
40
#include <sys/socketvar.h>
41
#include <net/if.h>
42
#include <net/route.h>
43
#include <net/netisr.h>
44
#include <netinet/in.h>
45
#include <netinet/in_var.h>
46
#include <arpa/inet.h>
47
 
48
#include <machine/cpu.h>
49
 
50
#include <pkgconf/net.h>
51
 
52
#include <cyg/infra/diag.h>
53
#include <cyg/hal/hal_intr.h>
54
#include <cyg/kernel/kapi.h>
55
 
56
#include <cyg/infra/cyg_ass.h>
57
 
58
#include <cyg/io/eth/netdev.h>
59
 
60
//---------------------------- splx() emulation ------------------------------
61
// This contains both the SPLX stuff and tsleep/wakeup - because those must
62
// be SPLX aware.  They release the SPLX lock when sleeping, and reclaim it
63
// (if needs be) at wakeup.
64
//
65
// The variable spl_state (and the associated bit patterns) is used to keep
66
// track of the "splx()" level.  This is an artifact of the original stack,
67
// based on the BSD interrupt world (interrupts and processing could be
68
// masked based on a level value, supported by hardware).  This is not very
69
// real-time, so the emulation uses proper eCos tools and techniques to
70
// accomplish the same result.  The key here is in the analysis of the
71
// various "levels", why they are used, etc.
72
//
73
// SPL_IMP is called in order to protect internal data structures
74
// short-term, primarily so that interrupt processing does not interfere
75
// with them.
76
// 
77
// SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
78
// time passes while the stamp is being taken (since it is a potentially
79
// non-idempotent data structure).
80
//
81
// SPL_SOFTNET is used to prevent all other stack processing, including
82
// interrupts (DSRs), etc.
83
//
84
// SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
85
// runs what should really be the network interface device's DSR, and any
86
// timeout routines that are scheduled.  (They are broken out into a thread
87
// to isolate the network locking from the rest of the system)
88
//
89
// NB a thread in thi state can tsleep(); see below.  Tsleep releases and
90
// reclaims the locks and so on.  This necessary because of the possible
91
// conflict where
92
//     I splsoft
93
//     I tsleep
94
//     He runs, he is lower priority
95
//     He splsofts
96
//     He or something else awakens me
97
//     I want to run, but he has splsoft, so I wait
98
//     He runs and releases splsoft
99
//     I awaken and go.
100
 
101
static volatile cyg_uint32 spl_state = 0;
102
#define SPL_IMP      0x01
103
#define SPL_NET      0x02
104
#define SPL_CLOCK    0x04
105
#define SPL_SOFTNET  0x08
106
#define SPL_INTERNAL 0x10
107
 
108
static cyg_mutex_t splx_mutex;
109
static volatile cyg_handle_t splx_thread;
110
 
111
 
112
#ifdef CYGIMPL_TRACE_SPLX   
113
#define SPLXARGS const char *file, const int line
114
#define SPLXMOREARGS , const char *file, const int line
115
#define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
116
#else
117
#define SPLXARGS void
118
#define SPLXMOREARGS
119
#define SPLXTRACE
120
#endif
121
 
122
 
123
static inline cyg_uint32
124
spl_any( cyg_uint32 which )
125
{
126
    cyg_uint32 old_spl = spl_state;
127
    if ( cyg_thread_self() != splx_thread ) {
128
        cyg_mutex_lock( &splx_mutex );
129
        old_spl = 0; // Free when we unlock this context
130
        CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
131
        CYG_ASSERT( 0 == spl_state, "spl still set" );
132
        splx_thread = cyg_thread_self();
133
    }
134
    CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
135
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
136
                "spl_any: mutex not mine" );
137
    spl_state |= which;
138
    return old_spl;
139
}
140
 
141
 
142
cyg_uint32
143
cyg_splimp(SPLXARGS)
144
{
145
    SPLXTRACE;
146
    return spl_any( SPL_IMP );
147
}
148
 
149
cyg_uint32
150
cyg_splclock(SPLXARGS)
151
{
152
    SPLXTRACE;
153
    return spl_any( SPL_CLOCK );
154
}
155
 
156
cyg_uint32
157
cyg_splnet(SPLXARGS)
158
{
159
    SPLXTRACE;
160
    return spl_any( SPL_NET );
161
}
162
 
163
cyg_uint32
164
cyg_splhigh(SPLXARGS)
165
{
166
    SPLXTRACE;
167
    // splhigh did SPLSOFTNET in the contrib, so this is the same
168
    return spl_any( SPL_SOFTNET );
169
}
170
 
171
cyg_uint32
172
cyg_splsoftnet(SPLXARGS)
173
{
174
    SPLXTRACE;
175
    return spl_any( SPL_SOFTNET );
176
}
177
 
178
cyg_uint32
179
cyg_splinternal(SPLXARGS)
180
{
181
    SPLXTRACE;
182
    return spl_any( SPL_INTERNAL );
183
}
184
 
185
 
186
//
187
// Return to a previous interrupt state/level.
188
//
189
void
190
cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
191
{
192
    SPLXTRACE;
193
 
194
    CYG_ASSERT( 0 != spl_state, "No state set" );
195
    CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
196
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
197
                "splx: mutex not mine" );
198
 
199
    spl_state &= old_state;
200
 
201
    if ( 0 == spl_state ) {
202
        splx_thread = 0;
203
        cyg_mutex_unlock( &splx_mutex );
204
    }
205
}
206
 
207
//------------------ tsleep() and wakeup() emulation ---------------------------
208
//
209
// Structure used to keep track of 'tsleep' style events
210
//
211
struct wakeup_event {
212
    void *chan;
213
    cyg_sem_t sem;
214
};
215
static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
216
 
217
 
218
// Called to initialize structures used by timeout functions
219
void
220
cyg_tsleep_init(void)
221
{
222
    int i;
223
    struct wakeup_event *ev;
224
    // Create list of "wakeup event" semaphores
225
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
226
        ev->chan = 0;
227
        cyg_semaphore_init(&ev->sem, 0);
228
    }
229
    // Initialize the mutex and thread id:
230
    cyg_mutex_init( &splx_mutex );
231
    splx_thread = 0;
232
}
233
 
234
 
235
//
236
// Signal an event
237
void
238
cyg_wakeup(void *chan)
239
{
240
    int i;
241
    struct wakeup_event *ev;
242
    cyg_scheduler_lock(); // Ensure scan is safe
243
    // NB this is broadcast semantics because a sleeper/wakee holds the
244
    // slot until they exit.  This avoids a race condition whereby the
245
    // semaphore can get an extra post - and then the slot is freed, so the
246
    // sem wait returns immediately, AOK, so the slot wasn't freed.
247
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++)
248
        if (ev->chan == chan)
249
            cyg_semaphore_post(&ev->sem);
250
 
251
    cyg_scheduler_unlock();
252
}
253
 
254
// ------------------------------------------------------------------------
255
// Wait for an event with timeout
256
//   tsleep(event, priority, state, timeout)
257
//     event - the thing to wait for
258
//     priority - unused
259
//     state    - a descriptive message
260
//     timeout  - max time (in ticks) to wait
261
//   returns:
262
//     0         - event was "signalled"
263
//     ETIMEDOUT - timeout occurred
264
//     EINTR     - thread broken out of sleep
265
//
266
int
267
cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
268
{
269
    int i, res = 0;
270
    struct wakeup_event *ev;
271
    cyg_tick_count_t sleep_time;
272
    cyg_handle_t self = cyg_thread_self();
273
    int old_splflags = 0; // no flags held
274
 
275
    cyg_scheduler_lock();
276
 
277
    // Safely find a free slot:
278
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
279
        if (ev->chan == 0) {
280
            ev->chan = chan;
281
            break;
282
        }
283
    }
284
    CYG_ASSERT( i <  CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
285
    CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
286
                "Tsleep - called with scheduler locked" );
287
    // Defensive:
288
    if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
289
        cyg_scheduler_unlock();
290
        return ETIMEDOUT;
291
    }
292
 
293
    // If we are the owner, then we must release the mutex when
294
    // we wait.
295
    if ( self == splx_thread ) {
296
        old_splflags = spl_state; // Keep them for restoration
297
        CYG_ASSERT( spl_state, "spl_state not set" );
298
        // Also want to assert that the mutex is locked...
299
        CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
300
        CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
301
        splx_thread = 0;
302
        spl_state = 0;
303
        cyg_mutex_unlock( &splx_mutex );
304
    }
305
 
306
    // Re-initialize the semaphore - it might have counted up arbitrarily
307
    // in the time between a prior sleeper being signalled and them
308
    // actually running.
309
    cyg_semaphore_init(&ev->sem, 0);
310
 
311
    // This part actually does the wait:
312
    // As of the new kernel, we can do this without unlocking the scheduler
313
    if (timo) {
314
        sleep_time = cyg_current_time() + timo;
315
        if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
316
            if( cyg_current_time() >= sleep_time )
317
                res = ETIMEDOUT;
318
            else
319
                res = EINTR;
320
        }
321
    } else {
322
        if (!cyg_semaphore_wait(&ev->sem) ) {
323
            res = EINTR;
324
        }
325
    }
326
 
327
    ev->chan = 0;  // Free the slot - the wakeup call cannot do this.
328
 
329
    if ( old_splflags ) { // restore to previous state
330
        // As of the new kernel, we can do this with the scheduler locked
331
        cyg_mutex_lock( &splx_mutex ); // this might wait
332
        CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
333
        CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
334
        splx_thread = self; // got it now...
335
        spl_state = old_splflags;
336
    }
337
 
338
    cyg_scheduler_unlock();
339
    return res;
340
}
341
 
342
 
343
 
344
// ------------------------------------------------------------------------
345
// DEBUGGING ROUTINES
346
#ifdef CYGIMPL_TRACE_SPLX   
347
#undef cyg_scheduler_lock
348
#undef cyg_scheduler_safe_lock
349
#undef cyg_scheduler_unlock
350
 
351
#define MAX_SCHED_EVENTS 256
352
static struct _sched_event {
353
    char *fun, *file;
354
    int line, lock;
355
} sched_event[MAX_SCHED_EVENTS];
356
static int next_sched_event = 0;
357
static int total_sched_events = 0;
358
 
359
static void
360
do_sched_event(char *fun, char *file, int line, int lock)
361
{
362
    struct _sched_event *se = &sched_event[next_sched_event];
363
    if (++next_sched_event == MAX_SCHED_EVENTS) {
364
        next_sched_event = 0;
365
    }
366
    se->fun = fun;
367
    se->file = file;
368
    se->line = line;
369
    se->lock = lock;
370
    total_sched_events++;
371
}
372
 
373
static void
374
show_sched_events(void)
375
{
376
    int i;
377
    struct _sched_event *se;
378
    if (total_sched_events < MAX_SCHED_EVENTS) {
379
        i = 0;
380
    } else {
381
        i = next_sched_event + 1;
382
        if (i == MAX_SCHED_EVENTS) i = 0;
383
    }
384
    diag_printf("%d total scheduler events\n", total_sched_events);
385
    while (i != next_sched_event) {
386
        se = &sched_event[i];
387
        diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
388
        if (++i == MAX_SCHED_EVENTS) i = 0;
389
    }
390
}
391
 
392
#define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
393
 
394
void
395
_cyg_scheduler_lock(char *file, int line)
396
{
397
    cyg_scheduler_lock();
398
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
399
}
400
 
401
void
402
_cyg_scheduler_safe_lock(char *file, int line)
403
{
404
    cyg_scheduler_safe_lock();
405
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
406
}
407
 
408
void
409
_cyg_scheduler_unlock(char *file, int line)
410
{
411
    cyg_scheduler_unlock();
412
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
413
}
414
#endif // CYGIMPL_TRACE_SPLX
415
 
416
// EOF synch.c

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.