OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [ecos-2.0/] [packages/] [net/] [tcpip/] [v2_0/] [src/] [ecos/] [synch.c] - Diff between revs 1254 and 1765

Only display areas with differences | Details | Blame | View Log

Rev 1254 Rev 1765
//==========================================================================
//==========================================================================
//
//
//      ecos/synch.c
//      ecos/synch.c
//
//
//      eCos wrapper and synch functions
//      eCos wrapper and synch functions
//
//
//==========================================================================
//==========================================================================
//####BSDCOPYRIGHTBEGIN####
//####BSDCOPYRIGHTBEGIN####
//
//
// -------------------------------------------
// -------------------------------------------
//
//
// Portions of this software may have been derived from OpenBSD or other sources,
// Portions of this software may have been derived from OpenBSD or other sources,
// and are covered by the appropriate copyright disclaimers included herein.
// and are covered by the appropriate copyright disclaimers included herein.
//
//
// -------------------------------------------
// -------------------------------------------
//
//
//####BSDCOPYRIGHTEND####
//####BSDCOPYRIGHTEND####
//==========================================================================
//==========================================================================
//#####DESCRIPTIONBEGIN####
//#####DESCRIPTIONBEGIN####
//
//
// Author(s):    gthomas, hmt
// Author(s):    gthomas, hmt
// Contributors: gthomas, hmt
// Contributors: gthomas, hmt
// Date:         2000-01-10
// Date:         2000-01-10
// Purpose:      
// Purpose:      
// Description:  
// Description:  
//              
//              
//
//
//####DESCRIPTIONEND####
//####DESCRIPTIONEND####
//
//
//==========================================================================
//==========================================================================
 
 
 
 
// Synch routines, etc., used by network code
// Synch routines, etc., used by network code
 
 
#include <sys/param.h>
#include <sys/param.h>
#include <sys/malloc.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/kernel.h>
#include <sys/domain.h>
#include <sys/domain.h>
#include <sys/protosw.h>
#include <sys/protosw.h>
#include <sys/sockio.h>
#include <sys/sockio.h>
#include <sys/socket.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/socketvar.h>
#include <net/if.h>
#include <net/if.h>
#include <net/route.h>
#include <net/route.h>
#include <net/netisr.h>
#include <net/netisr.h>
#include <netinet/in.h>
#include <netinet/in.h>
#include <netinet/in_var.h>
#include <netinet/in_var.h>
#include <arpa/inet.h>
#include <arpa/inet.h>
 
 
#include <machine/cpu.h>
#include <machine/cpu.h>
 
 
#include <pkgconf/net.h>
#include <pkgconf/net.h>
 
 
#include <cyg/infra/diag.h>
#include <cyg/infra/diag.h>
#include <cyg/hal/hal_intr.h>
#include <cyg/hal/hal_intr.h>
#include <cyg/kernel/kapi.h>
#include <cyg/kernel/kapi.h>
 
 
#include <cyg/infra/cyg_ass.h>
#include <cyg/infra/cyg_ass.h>
 
 
#include <cyg/io/eth/netdev.h>
#include <cyg/io/eth/netdev.h>
 
 
//---------------------------- splx() emulation ------------------------------
//---------------------------- splx() emulation ------------------------------
// This contains both the SPLX stuff and tsleep/wakeup - because those must
// This contains both the SPLX stuff and tsleep/wakeup - because those must
// be SPLX aware.  They release the SPLX lock when sleeping, and reclaim it
// be SPLX aware.  They release the SPLX lock when sleeping, and reclaim it
// (if needs be) at wakeup.
// (if needs be) at wakeup.
//
//
// The variable spl_state (and the associated bit patterns) is used to keep
// The variable spl_state (and the associated bit patterns) is used to keep
// track of the "splx()" level.  This is an artifact of the original stack,
// track of the "splx()" level.  This is an artifact of the original stack,
// based on the BSD interrupt world (interrupts and processing could be
// based on the BSD interrupt world (interrupts and processing could be
// masked based on a level value, supported by hardware).  This is not very
// masked based on a level value, supported by hardware).  This is not very
// real-time, so the emulation uses proper eCos tools and techniques to
// real-time, so the emulation uses proper eCos tools and techniques to
// accomplish the same result.  The key here is in the analysis of the
// accomplish the same result.  The key here is in the analysis of the
// various "levels", why they are used, etc.
// various "levels", why they are used, etc.
//
//
// SPL_IMP is called in order to protect internal data structures
// SPL_IMP is called in order to protect internal data structures
// short-term, primarily so that interrupt processing does not interfere
// short-term, primarily so that interrupt processing does not interfere
// with them.
// with them.
// 
// 
// SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
// SPL_CLOCK is called in order to ensure that a timestamp is valid i.e. no
// time passes while the stamp is being taken (since it is a potentially
// time passes while the stamp is being taken (since it is a potentially
// non-idempotent data structure).
// non-idempotent data structure).
//
//
// SPL_SOFTNET is used to prevent all other stack processing, including
// SPL_SOFTNET is used to prevent all other stack processing, including
// interrupts (DSRs), etc.
// interrupts (DSRs), etc.
//
//
// SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
// SPL_INTERNAL is used when running the pseudo-DSR in timeout.c - this
// runs what should really be the network interface device's DSR, and any
// runs what should really be the network interface device's DSR, and any
// timeout routines that are scheduled.  (They are broken out into a thread
// timeout routines that are scheduled.  (They are broken out into a thread
// to isolate the network locking from the rest of the system)
// to isolate the network locking from the rest of the system)
//
//
// NB a thread in thi state can tsleep(); see below.  Tsleep releases and
// NB a thread in thi state can tsleep(); see below.  Tsleep releases and
// reclaims the locks and so on.  This necessary because of the possible
// reclaims the locks and so on.  This necessary because of the possible
// conflict where
// conflict where
//     I splsoft
//     I splsoft
//     I tsleep
//     I tsleep
//     He runs, he is lower priority
//     He runs, he is lower priority
//     He splsofts
//     He splsofts
//     He or something else awakens me
//     He or something else awakens me
//     I want to run, but he has splsoft, so I wait
//     I want to run, but he has splsoft, so I wait
//     He runs and releases splsoft
//     He runs and releases splsoft
//     I awaken and go.
//     I awaken and go.
 
 
static volatile cyg_uint32 spl_state = 0;
static volatile cyg_uint32 spl_state = 0;
#define SPL_IMP      0x01
#define SPL_IMP      0x01
#define SPL_NET      0x02
#define SPL_NET      0x02
#define SPL_CLOCK    0x04
#define SPL_CLOCK    0x04
#define SPL_SOFTNET  0x08
#define SPL_SOFTNET  0x08
#define SPL_INTERNAL 0x10
#define SPL_INTERNAL 0x10
 
 
static cyg_mutex_t splx_mutex;
static cyg_mutex_t splx_mutex;
static volatile cyg_handle_t splx_thread;
static volatile cyg_handle_t splx_thread;
 
 
 
 
#ifdef CYGIMPL_TRACE_SPLX   
#ifdef CYGIMPL_TRACE_SPLX   
#define SPLXARGS const char *file, const int line
#define SPLXARGS const char *file, const int line
#define SPLXMOREARGS , const char *file, const int line
#define SPLXMOREARGS , const char *file, const int line
#define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
#define SPLXTRACE do_sched_event(__FUNCTION__, file, line, spl_state)
#else
#else
#define SPLXARGS void
#define SPLXARGS void
#define SPLXMOREARGS
#define SPLXMOREARGS
#define SPLXTRACE
#define SPLXTRACE
#endif
#endif
 
 
 
 
static inline cyg_uint32
static inline cyg_uint32
spl_any( cyg_uint32 which )
spl_any( cyg_uint32 which )
{
{
    cyg_uint32 old_spl = spl_state;
    cyg_uint32 old_spl = spl_state;
    if ( cyg_thread_self() != splx_thread ) {
    if ( cyg_thread_self() != splx_thread ) {
        cyg_mutex_lock( &splx_mutex );
        cyg_mutex_lock( &splx_mutex );
        old_spl = 0; // Free when we unlock this context
        old_spl = 0; // Free when we unlock this context
        CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
        CYG_ASSERT( 0 == splx_thread, "Thread still owned" );
        CYG_ASSERT( 0 == spl_state, "spl still set" );
        CYG_ASSERT( 0 == spl_state, "spl still set" );
        splx_thread = cyg_thread_self();
        splx_thread = cyg_thread_self();
    }
    }
    CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
    CYG_ASSERT( splx_mutex.locked, "spl_any: mutex not locked" );
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
                "spl_any: mutex not mine" );
                "spl_any: mutex not mine" );
    spl_state |= which;
    spl_state |= which;
    return old_spl;
    return old_spl;
}
}
 
 
 
 
cyg_uint32
cyg_uint32
cyg_splimp(SPLXARGS)
cyg_splimp(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    return spl_any( SPL_IMP );
    return spl_any( SPL_IMP );
}
}
 
 
cyg_uint32
cyg_uint32
cyg_splclock(SPLXARGS)
cyg_splclock(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    return spl_any( SPL_CLOCK );
    return spl_any( SPL_CLOCK );
}
}
 
 
cyg_uint32
cyg_uint32
cyg_splnet(SPLXARGS)
cyg_splnet(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    return spl_any( SPL_NET );
    return spl_any( SPL_NET );
}
}
 
 
cyg_uint32
cyg_uint32
cyg_splhigh(SPLXARGS)
cyg_splhigh(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    // splhigh did SPLSOFTNET in the contrib, so this is the same
    // splhigh did SPLSOFTNET in the contrib, so this is the same
    return spl_any( SPL_SOFTNET );
    return spl_any( SPL_SOFTNET );
}
}
 
 
cyg_uint32
cyg_uint32
cyg_splsoftnet(SPLXARGS)
cyg_splsoftnet(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    return spl_any( SPL_SOFTNET );
    return spl_any( SPL_SOFTNET );
}
}
 
 
cyg_uint32
cyg_uint32
cyg_splinternal(SPLXARGS)
cyg_splinternal(SPLXARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
    return spl_any( SPL_INTERNAL );
    return spl_any( SPL_INTERNAL );
}
}
 
 
 
 
//
//
// Return to a previous interrupt state/level.
// Return to a previous interrupt state/level.
//
//
void
void
cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
cyg_splx(cyg_uint32 old_state SPLXMOREARGS)
{
{
    SPLXTRACE;
    SPLXTRACE;
 
 
    CYG_ASSERT( 0 != spl_state, "No state set" );
    CYG_ASSERT( 0 != spl_state, "No state set" );
    CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
    CYG_ASSERT( splx_mutex.locked, "splx: mutex not locked" );
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
    CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == cyg_thread_self(),
                "splx: mutex not mine" );
                "splx: mutex not mine" );
 
 
    spl_state &= old_state;
    spl_state &= old_state;
 
 
    if ( 0 == spl_state ) {
    if ( 0 == spl_state ) {
        splx_thread = 0;
        splx_thread = 0;
        cyg_mutex_unlock( &splx_mutex );
        cyg_mutex_unlock( &splx_mutex );
    }
    }
}
}
 
 
//------------------ tsleep() and wakeup() emulation ---------------------------
//------------------ tsleep() and wakeup() emulation ---------------------------
//
//
// Structure used to keep track of 'tsleep' style events
// Structure used to keep track of 'tsleep' style events
//
//
struct wakeup_event {
struct wakeup_event {
    void *chan;
    void *chan;
    cyg_sem_t sem;
    cyg_sem_t sem;
};
};
static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
static struct wakeup_event wakeup_list[CYGPKG_NET_NUM_WAKEUP_EVENTS];
 
 
 
 
// Called to initialize structures used by timeout functions
// Called to initialize structures used by timeout functions
void
void
cyg_tsleep_init(void)
cyg_tsleep_init(void)
{
{
    int i;
    int i;
    struct wakeup_event *ev;
    struct wakeup_event *ev;
    // Create list of "wakeup event" semaphores
    // Create list of "wakeup event" semaphores
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
        ev->chan = 0;
        ev->chan = 0;
        cyg_semaphore_init(&ev->sem, 0);
        cyg_semaphore_init(&ev->sem, 0);
    }
    }
    // Initialize the mutex and thread id:
    // Initialize the mutex and thread id:
    cyg_mutex_init( &splx_mutex );
    cyg_mutex_init( &splx_mutex );
    splx_thread = 0;
    splx_thread = 0;
}
}
 
 
 
 
//
//
// Signal an event
// Signal an event
void
void
cyg_wakeup(void *chan)
cyg_wakeup(void *chan)
{
{
    int i;
    int i;
    struct wakeup_event *ev;
    struct wakeup_event *ev;
    cyg_scheduler_lock(); // Ensure scan is safe
    cyg_scheduler_lock(); // Ensure scan is safe
    // NB this is broadcast semantics because a sleeper/wakee holds the
    // NB this is broadcast semantics because a sleeper/wakee holds the
    // slot until they exit.  This avoids a race condition whereby the
    // slot until they exit.  This avoids a race condition whereby the
    // semaphore can get an extra post - and then the slot is freed, so the
    // semaphore can get an extra post - and then the slot is freed, so the
    // sem wait returns immediately, AOK, so the slot wasn't freed.
    // sem wait returns immediately, AOK, so the slot wasn't freed.
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++)
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++)
        if (ev->chan == chan)
        if (ev->chan == chan)
            cyg_semaphore_post(&ev->sem);
            cyg_semaphore_post(&ev->sem);
 
 
    cyg_scheduler_unlock();
    cyg_scheduler_unlock();
}
}
 
 
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// Wait for an event with timeout
// Wait for an event with timeout
//   tsleep(event, priority, state, timeout)
//   tsleep(event, priority, state, timeout)
//     event - the thing to wait for
//     event - the thing to wait for
//     priority - unused
//     priority - unused
//     state    - a descriptive message
//     state    - a descriptive message
//     timeout  - max time (in ticks) to wait
//     timeout  - max time (in ticks) to wait
//   returns:
//   returns:
//     0         - event was "signalled"
//     0         - event was "signalled"
//     ETIMEDOUT - timeout occurred
//     ETIMEDOUT - timeout occurred
//     EINTR     - thread broken out of sleep
//     EINTR     - thread broken out of sleep
//
//
int
int
cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
cyg_tsleep(void *chan, int pri, char *wmesg, int timo)
{
{
    int i, res = 0;
    int i, res = 0;
    struct wakeup_event *ev;
    struct wakeup_event *ev;
    cyg_tick_count_t sleep_time;
    cyg_tick_count_t sleep_time;
    cyg_handle_t self = cyg_thread_self();
    cyg_handle_t self = cyg_thread_self();
    int old_splflags = 0; // no flags held
    int old_splflags = 0; // no flags held
 
 
    cyg_scheduler_lock();
    cyg_scheduler_lock();
 
 
    // Safely find a free slot:
    // Safely find a free slot:
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
    for (i = 0, ev = wakeup_list;  i < CYGPKG_NET_NUM_WAKEUP_EVENTS;  i++, ev++) {
        if (ev->chan == 0) {
        if (ev->chan == 0) {
            ev->chan = chan;
            ev->chan = chan;
            break;
            break;
        }
        }
    }
    }
    CYG_ASSERT( i <  CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
    CYG_ASSERT( i <  CYGPKG_NET_NUM_WAKEUP_EVENTS, "no sleep slots" );
    CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
    CYG_ASSERT( 1 == cyg_scheduler_read_lock(),
                "Tsleep - called with scheduler locked" );
                "Tsleep - called with scheduler locked" );
    // Defensive:
    // Defensive:
    if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
    if ( i >= CYGPKG_NET_NUM_WAKEUP_EVENTS ) {
        cyg_scheduler_unlock();
        cyg_scheduler_unlock();
        return ETIMEDOUT;
        return ETIMEDOUT;
    }
    }
 
 
    // If we are the owner, then we must release the mutex when
    // If we are the owner, then we must release the mutex when
    // we wait.
    // we wait.
    if ( self == splx_thread ) {
    if ( self == splx_thread ) {
        old_splflags = spl_state; // Keep them for restoration
        old_splflags = spl_state; // Keep them for restoration
        CYG_ASSERT( spl_state, "spl_state not set" );
        CYG_ASSERT( spl_state, "spl_state not set" );
        // Also want to assert that the mutex is locked...
        // Also want to assert that the mutex is locked...
        CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
        CYG_ASSERT( splx_mutex.locked, "Splx mutex not locked" );
        CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
        CYG_ASSERT( (cyg_handle_t)splx_mutex.owner == self, "Splx mutex not mine" );
        splx_thread = 0;
        splx_thread = 0;
        spl_state = 0;
        spl_state = 0;
        cyg_mutex_unlock( &splx_mutex );
        cyg_mutex_unlock( &splx_mutex );
    }
    }
 
 
    // Re-initialize the semaphore - it might have counted up arbitrarily
    // Re-initialize the semaphore - it might have counted up arbitrarily
    // in the time between a prior sleeper being signalled and them
    // in the time between a prior sleeper being signalled and them
    // actually running.
    // actually running.
    cyg_semaphore_init(&ev->sem, 0);
    cyg_semaphore_init(&ev->sem, 0);
 
 
    // This part actually does the wait:
    // This part actually does the wait:
    // As of the new kernel, we can do this without unlocking the scheduler
    // As of the new kernel, we can do this without unlocking the scheduler
    if (timo) {
    if (timo) {
        sleep_time = cyg_current_time() + timo;
        sleep_time = cyg_current_time() + timo;
        if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
        if (!cyg_semaphore_timed_wait(&ev->sem, sleep_time)) {
            if( cyg_current_time() >= sleep_time )
            if( cyg_current_time() >= sleep_time )
                res = ETIMEDOUT;
                res = ETIMEDOUT;
            else
            else
                res = EINTR;
                res = EINTR;
        }
        }
    } else {
    } else {
        if (!cyg_semaphore_wait(&ev->sem) ) {
        if (!cyg_semaphore_wait(&ev->sem) ) {
            res = EINTR;
            res = EINTR;
        }
        }
    }
    }
 
 
    ev->chan = 0;  // Free the slot - the wakeup call cannot do this.
    ev->chan = 0;  // Free the slot - the wakeup call cannot do this.
 
 
    if ( old_splflags ) { // restore to previous state
    if ( old_splflags ) { // restore to previous state
        // As of the new kernel, we can do this with the scheduler locked
        // As of the new kernel, we can do this with the scheduler locked
        cyg_mutex_lock( &splx_mutex ); // this might wait
        cyg_mutex_lock( &splx_mutex ); // this might wait
        CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
        CYG_ASSERT( 0 == splx_thread, "Splx thread set in tsleep" );
        CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
        CYG_ASSERT( 0 == spl_state, "spl_state set in tsleep" );
        splx_thread = self; // got it now...
        splx_thread = self; // got it now...
        spl_state = old_splflags;
        spl_state = old_splflags;
    }
    }
 
 
    cyg_scheduler_unlock();
    cyg_scheduler_unlock();
    return res;
    return res;
}
}
 
 
 
 
 
 
// ------------------------------------------------------------------------
// ------------------------------------------------------------------------
// DEBUGGING ROUTINES
// DEBUGGING ROUTINES
#ifdef CYGIMPL_TRACE_SPLX   
#ifdef CYGIMPL_TRACE_SPLX   
#undef cyg_scheduler_lock
#undef cyg_scheduler_lock
#undef cyg_scheduler_safe_lock
#undef cyg_scheduler_safe_lock
#undef cyg_scheduler_unlock
#undef cyg_scheduler_unlock
 
 
#define MAX_SCHED_EVENTS 256
#define MAX_SCHED_EVENTS 256
static struct _sched_event {
static struct _sched_event {
    char *fun, *file;
    char *fun, *file;
    int line, lock;
    int line, lock;
} sched_event[MAX_SCHED_EVENTS];
} sched_event[MAX_SCHED_EVENTS];
static int next_sched_event = 0;
static int next_sched_event = 0;
static int total_sched_events = 0;
static int total_sched_events = 0;
 
 
static void
static void
do_sched_event(char *fun, char *file, int line, int lock)
do_sched_event(char *fun, char *file, int line, int lock)
{
{
    struct _sched_event *se = &sched_event[next_sched_event];
    struct _sched_event *se = &sched_event[next_sched_event];
    if (++next_sched_event == MAX_SCHED_EVENTS) {
    if (++next_sched_event == MAX_SCHED_EVENTS) {
        next_sched_event = 0;
        next_sched_event = 0;
    }
    }
    se->fun = fun;
    se->fun = fun;
    se->file = file;
    se->file = file;
    se->line = line;
    se->line = line;
    se->lock = lock;
    se->lock = lock;
    total_sched_events++;
    total_sched_events++;
}
}
 
 
static void
static void
show_sched_events(void)
show_sched_events(void)
{
{
    int i;
    int i;
    struct _sched_event *se;
    struct _sched_event *se;
    if (total_sched_events < MAX_SCHED_EVENTS) {
    if (total_sched_events < MAX_SCHED_EVENTS) {
        i = 0;
        i = 0;
    } else {
    } else {
        i = next_sched_event + 1;
        i = next_sched_event + 1;
        if (i == MAX_SCHED_EVENTS) i = 0;
        if (i == MAX_SCHED_EVENTS) i = 0;
    }
    }
    diag_printf("%d total scheduler events\n", total_sched_events);
    diag_printf("%d total scheduler events\n", total_sched_events);
    while (i != next_sched_event) {
    while (i != next_sched_event) {
        se = &sched_event[i];
        se = &sched_event[i];
        diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
        diag_printf("%s - lock: %d, called from %s.%d\n", se->fun, se->lock, se->file, se->line);
        if (++i == MAX_SCHED_EVENTS) i = 0;
        if (++i == MAX_SCHED_EVENTS) i = 0;
    }
    }
}
}
 
 
#define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
#define SPLX_TRACE_DATA() cyg_scheduler_read_lock()
 
 
void
void
_cyg_scheduler_lock(char *file, int line)
_cyg_scheduler_lock(char *file, int line)
{
{
    cyg_scheduler_lock();
    cyg_scheduler_lock();
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
}
}
 
 
void
void
_cyg_scheduler_safe_lock(char *file, int line)
_cyg_scheduler_safe_lock(char *file, int line)
{
{
    cyg_scheduler_safe_lock();
    cyg_scheduler_safe_lock();
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
}
}
 
 
void
void
_cyg_scheduler_unlock(char *file, int line)
_cyg_scheduler_unlock(char *file, int line)
{
{
    cyg_scheduler_unlock();
    cyg_scheduler_unlock();
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
    do_sched_event(__FUNCTION__, file, line, SPLX_TRACE_DATA());
}
}
#endif // CYGIMPL_TRACE_SPLX
#endif // CYGIMPL_TRACE_SPLX
 
 
// EOF synch.c
// EOF synch.c
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.