OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [net/] [tcpip/] [v2_0/] [src/] [ecos/] [synch.c] - Blame information for rev 307

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//      ecos/synch.c
4
//
5
//      eCos wrapper and synch functions
6
//
7
//==========================================================================
8
//####BSDCOPYRIGHTBEGIN####
9
//
10
// -------------------------------------------
11
//
12
// Portions of this software may have been derived from OpenBSD or other sources,
13
// and are covered by the appropriate copyright disclaimers included herein.
14
//
15
// -------------------------------------------
16
//
17
//####BSDCOPYRIGHTEND####
18
//==========================================================================
19
//#####DESCRIPTIONBEGIN####
20
//
21
// Author(s):    gthomas, hmt
22
// Contributors: gthomas, hmt
23
// Date:         2000-01-10
24
// Purpose:      
25
// Description:  
26
//              
27
//
28
//####DESCRIPTIONEND####
29
//
30
//==========================================================================
31
 
32
 
33
// Synch routines, etc., used by network code
34
 
35
#include <sys/param.h>
36
#include <sys/malloc.h>
37
#include <sys/mbuf.h>
38
#include <sys/kernel.h>
39
#include <sys/domain.h>
40
#include <sys/protosw.h>
41
#include <sys/sockio.h>
42
#include <sys/socket.h>
43
#include <sys/socketvar.h>
44
#include <net/if.h>
45
#include <net/route.h>
46
#include <net/netisr.h>
47
#include <netinet/in.h>
48
#include <netinet/in_var.h>
49
#include <arpa/inet.h>
50
 
51
#include <machine/cpu.h>
52
 
53
#include <pkgconf/net.h>
54
 
55
#include <cyg/infra/diag.h>
56
#include <cyg/hal/hal_intr.h>
57
#include <cyg/kernel/kapi.h>
58
 
59
#include <cyg/infra/cyg_ass.h>
60
 
61
#include <cyg/io/eth/netdev.h>
62
 
63
//---------------------------- splx() emulation ------------------------------
64
// This contains both the SPLX stuff and tsleep/wakeup - because those must
65
// be SPLX aware.  They release the SPLX lock when sleeping, and reclaim it
66
// (if needs be) at wakeup.
67
//
68
// The variable spl_state (and the associated bit patterns) is used to keep
69
// track of the "splx()" level.  This is an artifact of the original stack,
70
// based on the BSD interrupt world (interrupts and processing could be
71
// masked based on a level value, supported by hardware).  This is not very
72
// real-time, so the emulation uses proper eCos tools and techniques to
73
// accomplish the same result.  The key here is in the analysis of the
74
// various "levels", why they are used, etc.
75
//
76
// SPL_IMP is called in order to protect internal data structures
77
// short-term, primarily so that interrupt processing does not interfere
78
// with them.
79
// 
80
// SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
81
// time passes while the stamp is being taken (since it is a potentially
82
// non-idempotent data structure).
83
//
84
// SPL_SOFTNET is used to prevent all other stack processing, including
85
// interrupts (DSRs), etc.
86
//
87
// SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
88
// runs what should really be the network interface device's DSR, and any
89
// timeout routines that are scheduled.  (They are broken out into a thread
90
// to isolate the network locking from the rest of the system)
91
//
92
// NB a thread in thi state can tsleep(); see below.  Tsleep releases and
93
// reclaims the locks and so on.  This necessary because of the possible
94
// conflict where
95
//     I splsoft
96
//     I tsleep
97
//     He runs, he is lower priority
98
//     He splsofts
99
//     He or something else awakens me
100
//     I want to run, but he has splsoft, so I wait
101
//     He runs and releases splsoft
102
//     I awaken and go.
103
 
104
static volatile cyg_uint32 spl_state = 0;
105
#define SPL_IMP      0x01
106
#define SPL_NET      0x02
107
#define SPL_CLOCK    0x04
108
#define SPL_SOFTNET  0x08
109
#define SPL_INTERNAL 0x10
110
 
111
static cyg_mutex_t splx_mutex;
112
static volatile cyg_handle_t splx_thread;
113
 
114
 
115
#ifdef CYGIMPL_TRACE_SPLX   
116
#define SPLXARGS const char *file, const int line
117
#define SPLXMOREARGS , const char *file, const int line
118
#define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
119
#else
120
#define SPLXARGS void
121
#define SPLXMOREARGS
122
#define SPLXTRACE
123
#endif
124
 
125
 
126
static inline cyg_uint32
127
spl_any( cyg_uint32 which )
128
{
129
    cyg_uint32 old_spl = spl_state;
130
    if ( cyg_thread_self() != splx_thread ) {
131
        cyg_mutex_lock( &splx_mutex );
132
        old_spl = 0; // Free when we unlock this context
133
        CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
134
        CYG_ASSERT( 0 == spl_state, "spl still set" );
135
        splx_thread = cyg_thread_self();
136
    }
137
    CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
138
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
139
                "spl_any: mutex not mine" );
140
    spl_state |= which;
141
    return old_spl;
142
}
143
 
144
 
145
cyg_uint32
146
cyg_splimp(SPLXARGS)
147
{
148
    SPLXTRACE;
149
    return spl_any( SPL_IMP );
150
}
151
 
152
cyg_uint32
153
cyg_splclock(SPLXARGS)
154
{
155
    SPLXTRACE;
156
    return spl_any( SPL_CLOCK );
157
}
158
 
159
cyg_uint32
160
cyg_splnet(SPLXARGS)
161
{
162
    SPLXTRACE;
163
    return spl_any( SPL_NET );
164
}
165
 
166
cyg_uint32
167
cyg_splhigh(SPLXARGS)
168
{
169
    SPLXTRACE;
170
    // splhigh did SPLSOFTNET in the contrib, so this is the same
171
    return spl_any( SPL_SOFTNET );
172
}
173
 
174
cyg_uint32
175
cyg_splsoftnet(SPLXARGS)
176
{
177
    SPLXTRACE;
178
    return spl_any( SPL_SOFTNET );
179
}
180
 
181
cyg_uint32
182
cyg_splinternal(SPLXARGS)
183
{
184
    SPLXTRACE;
185
    return spl_any( SPL_INTERNAL );
186
}
187
 
188
 
189
//
190
// Return to a previous interrupt state/level.
191
//
192
void
193
cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
194
{
195
    SPLXTRACE;
196
 
197
    CYG_ASSERT( 0 != spl_state, "No state set" );
198
    CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
199
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
200
                "splx: mutex not mine" );
201
 
202
    spl_state &= old_state;
203
 
204
    if ( 0 == spl_state ) {
205
        splx_thread = 0;
206
        cyg_mutex_unlock( &splx_mutex );
207
    }
208
}
209
 
210
//------------------ tsleep() and wakeup() emulation ---------------------------
211
//
212
// Structure used to keep track of 'tsleep' style events
213
//
214
struct wakeup_event {
215
    void *chan;
216
    cyg_sem_t sem;
217
};
218
static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
219
 
220
 
221
// Called to initialize structures used by timeout functions
222
void
223
cyg_tsleep_init(void)
224
{
225
    int i;
226
    struct wakeup_event *ev;
227
    // Create list of "wakeup event" semaphores
228
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
229
        ev->chan = 0;
230
        cyg_semaphore_init(&ev->sem, 0);
231
    }
232
    // Initialize the mutex and thread id:
233
    cyg_mutex_init( &splx_mutex );
234
    splx_thread = 0;
235
}
236
 
237
 
238
//
239
// Signal an event
240
void
241
cyg_wakeup(void *chan)
242
{
243
    int i;
244
    struct wakeup_event *ev;
245
    cyg_scheduler_lock(); // Ensure scan is safe
246
    // NB this is broadcast semantics because a sleeper/wakee holds the
247
    // slot until they exit.  This avoids a race condition whereby the
248
    // semaphore can get an extra post - and then the slot is freed, so the
249
    // sem wait returns immediately, AOK, so the slot wasn't freed.
250
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++)
251
        if (ev->chan == chan)
252
            cyg_semaphore_post(&ev->sem);
253
 
254
    cyg_scheduler_unlock();
255
}
256
 
257
// ------------------------------------------------------------------------
258
// Wait for an event with timeout
259
//   tsleep(event, priority, state, timeout)
260
//     event - the thing to wait for
261
//     priority - unused
262
//     state    - a descriptive message
263
//     timeout  - max time (in ticks) to wait
264
//   returns:
265
//     0         - event was "signalled"
266
//     ETIMEDOUT - timeout occurred
267
//     EINTR     - thread broken out of sleep
268
//
269
int
270
cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
271
{
272
    int i, res = 0;
273
    struct wakeup_event *ev;
274
    cyg_tick_count_t sleep_time;
275
    cyg_handle_t self = cyg_thread_self();
276
    int old_splflags = 0; // no flags held
277
 
278
    cyg_scheduler_lock();
279
 
280
    // Safely find a free slot:
281
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
282
        if (ev->chan == 0) {
283
            ev->chan = chan;
284
            break;
285
        }
286
    }
287
    CYG_ASSERT( i <  CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
288
    CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
289
                "Tsleep - called with scheduler locked" );
290
    // Defensive:
291
    if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
292
        cyg_scheduler_unlock();
293
        return ETIMEDOUT;
294
    }
295
 
296
    // If we are the owner, then we must release the mutex when
297
    // we wait.
298
    if ( self == splx_thread ) {
299
        old_splflags = spl_state; // Keep them for restoration
300
        CYG_ASSERT( spl_state, "spl_state not set" );
301
        // Also want to assert that the mutex is locked...
302
        CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
303
        CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
304
        splx_thread = 0;
305
        spl_state = 0;
306
        cyg_mutex_unlock( &splx_mutex );
307
    }
308
 
309
    // Re-initialize the semaphore - it might have counted up arbitrarily
310
    // in the time between a prior sleeper being signalled and them
311
    // actually running.
312
    cyg_semaphore_init(&ev->sem, 0);
313
 
314
    // This part actually does the wait:
315
    // As of the new kernel, we can do this without unlocking the scheduler
316
    if (timo) {
317
        sleep_time = cyg_current_time() + timo;
318
        if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
319
            if( cyg_current_time() >= sleep_time )
320
                res = ETIMEDOUT;
321
            else
322
                res = EINTR;
323
        }
324
    } else {
325
        if (!cyg_semaphore_wait(&ev->sem) ) {
326
            res = EINTR;
327
        }
328
    }
329
 
330
    ev->chan = 0;  // Free the slot - the wakeup call cannot do this.
331
 
332
    if ( old_splflags ) { // restore to previous state
333
        // As of the new kernel, we can do this with the scheduler locked
334
        cyg_mutex_lock( &splx_mutex ); // this might wait
335
        CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
336
        CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
337
        splx_thread = self; // got it now...
338
        spl_state = old_splflags;
339
    }
340
 
341
    cyg_scheduler_unlock();
342
    return res;
343
}
344
 
345
 
346
 
347
// ------------------------------------------------------------------------
348
// DEBUGGING ROUTINES
349
#ifdef CYGIMPL_TRACE_SPLX   
350
#undef cyg_scheduler_lock
351
#undef cyg_scheduler_safe_lock
352
#undef cyg_scheduler_unlock
353
 
354
#define MAX_SCHED_EVENTS 256
355
static struct _sched_event {
356
    char *fun, *file;
357
    int line, lock;
358
} sched_event[MAX_SCHED_EVENTS];
359
static int next_sched_event = 0;
360
static int total_sched_events = 0;
361
 
362
static void
363
do_sched_event(char *fun, char *file, int line, int lock)
364
{
365
    struct _sched_event *se = &sched_event[next_sched_event];
366
    if (++next_sched_event == MAX_SCHED_EVENTS) {
367
        next_sched_event = 0;
368
    }
369
    se->fun = fun;
370
    se->file = file;
371
    se->line = line;
372
    se->lock = lock;
373
    total_sched_events++;
374
}
375
 
376
static void
377
show_sched_events(void)
378
{
379
    int i;
380
    struct _sched_event *se;
381
    if (total_sched_events < MAX_SCHED_EVENTS) {
382
        i = 0;
383
    } else {
384
        i = next_sched_event + 1;
385
        if (i == MAX_SCHED_EVENTS) i = 0;
386
    }
387
    diag_printf("%d total scheduler events\n", total_sched_events);
388
    while (i != next_sched_event) {
389
        se = &sched_event[i];
390
        diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
391
        if (++i == MAX_SCHED_EVENTS) i = 0;
392
    }
393
}
394
 
395
#define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
396
 
397
void
398
_cyg_scheduler_lock(char *file, int line)
399
{
400
    cyg_scheduler_lock();
401
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
402
}
403
 
404
void
405
_cyg_scheduler_safe_lock(char *file, int line)
406
{
407
    cyg_scheduler_safe_lock();
408
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
409
}
410
 
411
void
412
_cyg_scheduler_unlock(char *file, int line)
413
{
414
    cyg_scheduler_unlock();
415
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
416
}
417
#endif // CYGIMPL_TRACE_SPLX
418
 
419
// EOF synch.c

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.