URL
https://opencores.org/ocsvn/openrisc/openrisc/trunk
Subversion Repositories openrisc
Compare Revisions
- This comparison shows the changes necessary to convert path
/openrisc/trunk/gnu-dev/or1k-gcc/libjava/sysdep
- from Rev 753 to Rev 764
- ↔ Reverse comparison
Rev 753 → Rev 764
/arm/locks.h
0,0 → 1,133
// locks.h - Thread synchronization primitives. ARM implementation. |
|
/* Copyright (C) 2007 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
#if (__ARM_EABI__ && __linux) |
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap(addr, old, new_val); |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__sync_synchronize(); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
// On X86, the hardware ensures that reads are properly ordered. |
inline static void |
read_barrier() |
{ |
__sync_synchronize(); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__sync_synchronize(); |
} |
|
#else |
|
/* Atomic compare and exchange. These sequences are not actually |
atomic; there is a race if *ADDR != OLD_VAL and we are preempted |
between the two swaps. However, they are very close to atomic, and |
are the best that a pre-ARMv6 implementation can do without |
operating system support. LinuxThreads has been using these |
sequences for many years. */ |
|
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old_val, |
obj_addr_t new_val) |
{ |
volatile obj_addr_t result, tmp; |
__asm__ ("\n" |
"0: ldr %[tmp],[%[addr]]\n" |
" cmp %[tmp],%[old_val]\n" |
" movne %[result],#0\n" |
" bne 1f\n" |
" swp %[result],%[new_val],[%[addr]]\n" |
" cmp %[tmp],%[result]\n" |
" swpne %[tmp],%[result],[%[addr]]\n" |
" bne 0b\n" |
" mov %[result],#1\n" |
"1:" |
: [result] "=&r" (result), [tmp] "=&r" (tmp) |
: [addr] "r" (addr), [new_val] "r" (new_val), [old_val] "r" (old_val) |
: "cc", "memory"); |
|
return result; |
} |
|
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__("" : : : "memory"); |
*(addr) = new_val; |
} |
|
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
inline static void |
read_barrier() |
{ |
__asm__ __volatile__("" : : : "memory"); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__asm__ __volatile__("" : : : "memory"); |
} |
|
#endif |
#endif |
/arm/backtrace.h
0,0 → 1,35
// backtrace.h - Fallback backtrace implementation. ARM implementation. |
|
/* Copyright (C) 2005, 2006 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_BACKTRACE_H__ |
#define __SYSDEP_BACKTRACE_H__ |
|
#include <java-stack.h> |
|
extern "C" |
{ |
/* Unwind through the call stack calling TRACE_FN with STATE for every stack |
frame. Returns the reason why the unwinding was stopped. */ |
#ifdef __ARM_EABI_UNWINDER__ |
|
#define _Unwind_FindEnclosingFunction(PC) \ |
(PC) |
|
_Unwind_Reason_Code |
fallback_backtrace (_Unwind_Reason_Code (*)(struct _Unwind_Context*, void*), _Jv_UnwindState *) |
#else |
_Unwind_Reason_Code |
fallback_backtrace (_Unwind_Trace_Fn, _Jv_UnwindState *) |
#endif |
{ |
return _URC_NO_REASON; |
} |
} |
#endif |
/powerpc/locks.h
0,0 → 1,97
// locks.h - Thread synchronization primitives. PowerPC implementation. |
|
/* Copyright (C) 2002,2008 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
#ifdef __LP64__ |
#define _LARX "ldarx " |
#define _STCX "stdcx. " |
#else |
#define _LARX "lwarx " |
#ifdef __PPC405__ |
#define _STCX "sync; stwcx. " |
#else |
#define _STCX "stwcx. " |
#endif |
#endif |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
inline static bool |
compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
obj_addr_t ret; |
|
__asm__ __volatile__ ( |
" " _LARX "%0,0,%1 \n" |
" xor. %0,%3,%0\n" |
" bne $+12\n" |
" " _STCX "%2,0,%1\n" |
" bne- $-16\n" |
: "=&r" (ret) |
: "r" (addr), "r" (new_val), "r" (old) |
: "cr0", "memory"); |
|
/* This version of __compare_and_swap is to be used when acquiring |
a lock, so we don't need to worry about whether other memory |
operations have completed, but we do need to be sure that any loads |
after this point really occur after we have acquired the lock. */ |
__asm__ __volatile__ ("isync" : : : "memory"); |
return ret == 0; |
} |
|
inline static void |
release_set (volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__ ("sync" : : : "memory"); |
*addr = new_val; |
} |
|
inline static bool |
compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
obj_addr_t ret; |
|
__asm__ __volatile__ ("sync" : : : "memory"); |
|
__asm__ __volatile__ ( |
" " _LARX "%0,0,%1 \n" |
" xor. %0,%3,%0\n" |
" bne $+12\n" |
" " _STCX "%2,0,%1\n" |
" bne- $-16\n" |
: "=&r" (ret) |
: "r" (addr), "r" (new_val), "r" (old) |
: "cr0", "memory"); |
|
return ret == 0; |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
inline static void |
read_barrier () |
{ |
__asm__ __volatile__ ("isync" : : : "memory"); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier () |
{ |
__asm__ __volatile__ ("sync" : : : "memory"); |
} |
|
#endif |
/powerpc/descriptor.h
0,0 → 1,9
// Given a function pointer, return the code address. |
|
#ifdef _CALL_AIX |
// The function descriptor is actually multiple words, |
// but we don't care about anything except the first. |
# define UNWRAP_FUNCTION_DESCRIPTOR(X) (*(void **)(X)) |
#else |
# define UNWRAP_FUNCTION_DESCRIPTOR(X) (X) |
#endif |
/descriptor-n.h
0,0 → 1,3
// Given a function pointer, return the code address. |
|
#define UNWRAP_FUNCTION_DESCRIPTOR(X) (X) |
/pa/locks.h
0,0 → 1,110
// locks.h - Thread synchronization primitives. PA-RISC implementation. |
|
/* Copyright (C) 2002, 2005 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
// Integer type big enough for object address. |
typedef size_t obj_addr_t; |
|
template<int _Inst> |
struct _pa_jv_cas_lock |
{ |
static volatile int _S_pa_jv_cas_lock; |
}; |
|
template<int _Inst> |
volatile int |
_pa_jv_cas_lock<_Inst>::_S_pa_jv_cas_lock __attribute__ ((aligned (16))) = 1; |
|
// Because of the lack of weak support when using the hpux som |
// linker, we explicitly instantiate the atomicity lock. |
template volatile int _pa_jv_cas_lock<0>::_S_pa_jv_cas_lock; |
|
// Atomically replace *addr by new_val if it was initially equal to old_val. |
// Return true if the comparison is successful. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
// The following implementation is atomic but it can deadlock |
// (e.g., if a thread dies holding the lock). |
inline static bool |
__attribute__ ((__unused__)) |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old_val, |
obj_addr_t new_val) |
{ |
bool result; |
int tmp; |
volatile int& lock = _pa_jv_cas_lock<0>::_S_pa_jv_cas_lock; |
|
__asm__ __volatile__ ("ldcw 0(%1),%0\n\t" |
"cmpib,<>,n 0,%0,.+20\n\t" |
"ldw 0(%1),%0\n\t" |
"cmpib,= 0,%0,.-4\n\t" |
"nop\n\t" |
"b,n .-20" |
: "=&r" (tmp) |
: "r" (&lock) |
: "memory"); |
|
if (*addr != old_val) |
result = false; |
else |
{ |
*addr = new_val; |
result = true; |
} |
|
/* Reset lock with PA 2.0 "ordered" store. */ |
__asm__ __volatile__ ("stw,ma %1,0(%0)" |
: : "r" (&lock), "r" (tmp) : "memory"); |
|
return result; |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__(" " : : : "memory"); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
inline static void |
read_barrier() |
{ |
__asm__ __volatile__(" " : : : "memory"); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__asm__ __volatile__(" " : : : "memory"); |
} |
|
#endif |
|
/pa/descriptor.h
0,0 → 1,7
// Given a function pointer, return the code address. |
// If the plabel bit is set, mask it off and return the code from the |
// first word of the function descriptor. Otherwise, the function |
// pointer is the code address. |
|
#define UNWRAP_FUNCTION_DESCRIPTOR(X) \ |
(((unsigned int)(X)) & 2 ? *(void **)(((unsigned int)(X)) & ~3) : (X)) |
/pa/descriptor-pa32-hpux.h
0,0 → 1,91
/* descriptor-pa32-hpux.h - Given a function pointer, extract and return the |
actual code address of the corresponding function. |
|
This is done by checking if the plabel bit is set. If it's not set, |
return the function pointer. If it's set, mask it off and extract |
the address from the function descriptor. This address may point |
to an export stub. If so, extract the branch target from the stub |
and return it. Otherwise, the address from the function descriptor |
is returned. |
|
Copyright (C) 2006 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#define UNWRAP_FUNCTION_DESCRIPTOR pa_unwrap_function_descriptor |
|
#ifdef __cplusplus |
extern "C" { |
#endif |
|
/* Extract bit field from word using HP's numbering (MSB = 0). */ |
#define GET_FIELD(X, FROM, TO) \ |
((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) |
|
static inline int |
sign_extend (int x, int len) |
{ |
int signbit = (1 << (len - 1)); |
int mask = (signbit << 1) - 1; |
return ((x & mask) ^ signbit) - signbit; |
} |
|
/* Extract a 17-bit signed constant from branch instructions. */ |
static inline int |
extract_17 (unsigned word) |
{ |
return sign_extend (GET_FIELD (word, 19, 28) |
| GET_FIELD (word, 29, 29) << 10 |
| GET_FIELD (word, 11, 15) << 11 |
| (word & 0x1) << 16, 17); |
} |
|
/* Extract a 22-bit signed constant from branch instructions. */ |
static inline int |
extract_22 (unsigned word) |
{ |
return sign_extend (GET_FIELD (word, 19, 28) |
| GET_FIELD (word, 29, 29) << 10 |
| GET_FIELD (word, 11, 15) << 11 |
| GET_FIELD (word, 6, 10) << 16 |
| (word & 0x1) << 21, 22); |
} |
|
static void * |
pa_unwrap_function_descriptor (void *addr) |
{ |
unsigned int *tmp_addr; |
|
/* Check if plabel bit is set in function pointer. */ |
if (!((unsigned int) addr & 2)) |
return addr; |
|
tmp_addr = *(unsigned int **) ((unsigned int) addr & ~3); |
|
/* If TMP_ADDR points to an export stub, adjust it so that it points |
to the branch target of the stub. */ |
if ((*tmp_addr & 0xffe0e002) == 0xe8400000 /* bl x,r2 */ |
&& *(tmp_addr + 1) == 0x08000240 /* nop */ |
&& *(tmp_addr + 2) == 0x4bc23fd1 /* ldw -18(sp),rp */ |
&& *(tmp_addr + 3) == 0x004010a1 /* ldsid (rp),r1 */ |
&& *(tmp_addr + 4) == 0x00011820 /* mtsp r1,sr0 */ |
&& *(tmp_addr + 5) == 0xe0400002) /* be,n 0(sr0,rp) */ |
/* Extract target address from PA 1.x 17-bit branch. */ |
tmp_addr += extract_17 (*tmp_addr) + 2; |
else if ((*tmp_addr & 0xfc00e002) == 0xe800a000 /* b,l x,r2 */ |
&& *(tmp_addr + 1) == 0x08000240 /* nop */ |
&& *(tmp_addr + 2) == 0x4bc23fd1 /* ldw -18(sp),rp */ |
&& *(tmp_addr + 3) == 0xe840d002) /* bve,n (rp) */ |
/* Extract target address from PA 2.0 22-bit branch. */ |
tmp_addr += extract_22 (*tmp_addr) + 2; |
|
return (void *) tmp_addr; |
} |
|
#ifdef __cplusplus |
} |
#endif |
/pa/descriptor-pa64-hpux.h
0,0 → 1,6
// Given a function pointer, return the code address. |
// If the plabel bit is set, mask it off and return the code from the |
// first word of the function descriptor. Otherwise, the function |
// pointer is the code address. |
|
#define UNWRAP_FUNCTION_DESCRIPTOR(X) *(void **)((unsigned long) (X) + 16) |
/ia64/locks.h
0,0 → 1,61
// locks.h - Thread synchronization primitives. IA64 implementation. |
|
/* Copyright (C) 2002 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
#include <ia64intrin.h> |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap (addr, old, new_val); |
} |
|
// The fact that *addr is volatile should cause the compiler to |
// automatically generate an st8.rel. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__("" : : : "memory"); |
*(addr) = new_val; |
} |
|
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
register unsigned long ar_ccv __asm__("ar.ccv") = old; |
unsigned long out; |
__asm__ __volatile__("cmpxchg8.rel %0=%1,%2,%4" |
: "=r"(out), "=m"(*addr) |
: "r"(new_val), "m"(*addr), "d"(ar_ccv) : "memory"); |
return (out == old); |
} |
|
inline static void |
read_barrier() |
{ |
__sync_synchronize (); |
} |
|
inline static void |
write_barrier() |
{ |
__sync_synchronize (); |
} |
|
#endif |
/alpha/locks.h
0,0 → 1,66
// locks.h - Thread synchronization primitives. Alpha implementation. |
|
/* Copyright (C) 2002, 2011 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
/* Integer type big enough for object address. */ |
typedef size_t obj_addr_t; |
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap(addr, old, new_val); |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__sync_synchronize(); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
inline static void |
read_barrier() |
{ |
__asm__ __volatile__("mb" : : : "memory"); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__asm__ __volatile__("wmb" : : : "memory"); |
} |
|
#endif |
/m68k/locks.h
0,0 → 1,72
// locks.h - Thread synchronization primitives. m68k implementation. |
|
/* Copyright (C) 2006 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
/* Integer type big enough for object address. */ |
typedef size_t obj_addr_t __attribute__ ((aligned (4))); |
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
static inline bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, obj_addr_t new_val) |
{ |
char result; |
__asm__ __volatile__("cas.l %2,%3,%0; seq %1" |
: "+m" (*addr), "=d" (result), "+d" (old) |
: "d" (new_val) |
: "memory"); |
return (bool) result; |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
// On m68k, the hardware shouldn't reorder reads and writes, |
// so we just have to convince gcc not to do it either. |
static inline void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__(" " : : : "memory"); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
static inline bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
// On m68k, the hardware ensures that reads are properly ordered. |
static inline void |
read_barrier(void) |
{ |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
static inline void |
write_barrier(void) |
{ |
// m68k does not reorder writes. We just need to ensure that gcc also doesn't. |
__asm__ __volatile__(" " : : : "memory"); |
} |
#endif |
/mips/locks.h
0,0 → 1,68
// locks.h - Thread synchronization primitives. MIPS implementation. |
|
/* Copyright (C) 2003 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
/* Integer type big enough for object address. */ |
typedef unsigned obj_addr_t __attribute__((__mode__(__pointer__))); |
|
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap(addr, old, new_val); |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__sync_synchronize(); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
// On X86, the hardware ensures that reads are properly ordered. |
inline static void |
read_barrier() |
{ |
__sync_synchronize(); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__sync_synchronize(); |
} |
|
#endif // __SYSDEP_LOCKS_H__ |
/sparc/locks.h
0,0 → 1,142
// locks.h - Thread synchronization primitives. Sparc implementation. |
|
/* Copyright (C) 2002, 2007 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
#ifdef __arch64__ |
/* Sparc64 implementation, use cas instruction. */ |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
__asm__ __volatile__("casx [%2], %3, %0\n\t" |
"membar #StoreLoad | #StoreStore" |
: "=&r" (new_val) |
: "0" (new_val), "r" (addr), "r" (old) |
: "memory"); |
|
return (new_val == old) ? true : false; |
} |
|
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__("membar #StoreStore | #LoadStore" : : : "memory"); |
*(addr) = new_val; |
} |
|
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
inline static void |
read_barrier() |
{ |
__asm__ __volatile__("membar #LoadLoad | #LoadStore" : : : "memory"); |
} |
|
inline static void |
write_barrier() |
{ |
__asm__ __volatile__("membar #StoreLoad | #StoreStore" : : : "memory"); |
} |
#else |
/* Sparc32 implementation, use a spinlock. */ |
static unsigned char __cas_lock = 0; |
|
inline static void |
__cas_start_atomic(void) |
{ |
unsigned int tmp; |
__asm__ __volatile__( |
"1: ldstub [%1], %0\n" |
" orcc %0, 0x0, %%g0\n" |
" be 3f\n" |
" nop\n" |
"2: ldub [%1], %0\n" |
" orcc %0, 0x0, %%g0\n" |
" bne 2b\n" |
" nop\n" |
"3:" : "=&r" (tmp) |
: "r" (&__cas_lock) |
: "memory", "cc"); |
} |
|
inline static void |
__cas_end_atomic(void) |
{ |
__asm__ __volatile__( |
"stb %%g0, [%0]" |
: /* no outputs */ |
: "r" (&__cas_lock) |
: "memory"); |
} |
|
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
bool ret; |
|
__cas_start_atomic (); |
if (*addr != old) |
{ |
ret = false; |
} |
else |
{ |
*addr = new_val; |
ret = true; |
} |
__cas_end_atomic (); |
|
return ret; |
} |
|
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
/* Technically stbar would be needed here but no sparc32 |
system actually requires it. Also the stbar would mean |
this code would not work on sparcv7 chips. */ |
__asm__ __volatile__("" : : : "memory"); |
*(addr) = new_val; |
} |
|
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
inline static void |
read_barrier() |
{ |
__asm__ __volatile__ ("" : : : "memory"); |
} |
|
inline static void |
write_barrier() |
{ |
__asm__ __volatile__ ("" : : : "memory"); |
} |
#endif /* __arch64__ */ |
|
#endif /* ! __SYSDEP_LOCKS_H__ */ |
/i386/locks.h
0,0 → 1,69
/* locks.h - Thread synchronization primitives. X86/x86-64 implementation. |
|
Copyright (C) 2002, 2011 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return __sync_bool_compare_and_swap (addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
// On X86/x86-64, the hardware ensures that reads are properly ordered. |
inline static void |
read_barrier() |
{ |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
/* x86-64/X86 does not reorder writes. We just need to ensure that |
gcc also doesn't. */ |
__asm__ __volatile__(" " : : : "memory"); |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
// On X86/x86-64, the hardware shouldn't reorder reads and writes, |
// so we just have to convince gcc not to do it either. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
write_barrier (); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
#endif |
/i386/backtrace.h
0,0 → 1,123
// backtrace.h - Fallback backtrace implementation. i386 implementation. |
|
/* Copyright (C) 2005, 2006 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_BACKTRACE_H__ |
#define __SYSDEP_BACKTRACE_H__ |
|
#include <java-stack.h> |
|
#ifdef __CYGWIN__ |
/* To allow this to link as a DLL. */ |
#define MAIN_FUNC dll_crt0__FP11per_process |
extern "C" int MAIN_FUNC () __declspec(dllimport); |
#elif defined (_WIN32) |
#define MAIN_FUNC DllMain |
extern "C" int __stdcall MAIN_FUNC (void *, unsigned long, void *); |
#else /* !__CYGWIN__ && !_WIN32 */ |
#define MAIN_FUNC main |
extern int MAIN_FUNC (int, char **); |
#endif /* ?__CYGWIN__ */ |
|
/* The context used to keep track of our position while unwinding through |
the call stack. */ |
struct _Unwind_Context |
{ |
/* The starting address of the method. */ |
_Jv_uintptr_t meth_addr; |
|
/* The return address in the method. */ |
_Jv_uintptr_t ret_addr; |
}; |
|
#ifdef SJLJ_EXCEPTIONS |
|
#undef _Unwind_GetIPInfo |
#define _Unwind_GetIPInfo(ctx,ip_before_insn) \ |
(*(ip_before_insn) = 1, (ctx)->ret_addr) |
|
#undef _Unwind_GetRegionStart |
#define _Unwind_GetRegionStart(ctx) \ |
((ctx)->meth_addr) |
|
#undef _Unwind_Backtrace |
#define _Unwind_Backtrace(trace_fn,state_ptr) \ |
(fallback_backtrace (trace_fn, state_ptr)) |
|
#endif /* SJLJ_EXCEPTIONS */ |
|
/* Unwind through the call stack calling TRACE_FN with STATE for each stack |
frame. Returns the reason why the unwinding was stopped. */ |
_Unwind_Reason_Code |
fallback_backtrace (_Unwind_Trace_Fn trace_fn, _Jv_UnwindState *state) |
{ |
register _Jv_uintptr_t *_ebp __asm__ ("ebp"); |
register _Jv_uintptr_t _esp __asm__ ("esp"); |
_Jv_uintptr_t rfp; |
_Unwind_Context ctx; |
|
for (rfp = *_ebp; rfp; rfp = *(_Jv_uintptr_t *)rfp) |
{ |
/* Sanity checks to eliminate dubious-looking frame pointer chains. |
The frame pointer should be a 32-bit word-aligned stack address. |
Since the stack grows downwards on x86, the frame pointer must have |
a value greater than the current value of the stack pointer, it |
should not be below the supposed next frame pointer and it should |
not be too far off from the supposed next frame pointer. */ |
int diff = *(_Jv_uintptr_t *)rfp - rfp; |
if ((rfp & 0x00000003) != 0 || rfp < _esp |
|| diff > 4 * 1024 || diff < 0) |
break; |
|
/* Get the return address in the calling function. This is stored on |
the stack just before the value of the old frame pointer. */ |
ctx.ret_addr = *(_Jv_uintptr_t *)(rfp + sizeof (_Jv_uintptr_t)); |
|
/* Try to locate a "pushl %ebp; movl %esp, %ebp" function prologue |
by scanning backwards at even addresses below the return address. |
This instruction sequence is encoded either as 0x55 0x89 0xE5 or as |
0x55 0x8B 0xEC. We give up if we do not find this sequence even |
after scanning 1024K of memory. |
FIXME: This is not robust and will probably give us false positives, |
but this is about the best we can do if we do not have DWARF-2 unwind |
information based exception handling. */ |
ctx.meth_addr = (_Jv_uintptr_t)NULL; |
_Jv_uintptr_t scan_addr = (ctx.ret_addr & 0xFFFFFFFE) - 2; |
_Jv_uintptr_t limit_addr |
= (scan_addr > 1024 * 1024) ? (scan_addr - 1024 * 1024) : 2; |
for ( ; scan_addr >= limit_addr; scan_addr -= 2) |
{ |
unsigned char *scan_bytes = (unsigned char *)scan_addr; |
if (scan_bytes[0] == 0x55 |
&& ((scan_bytes[1] == 0x89 && scan_bytes[2] == 0xE5) |
|| (scan_bytes[1] == 0x8B && scan_bytes[2] == 0xEC))) |
{ |
ctx.meth_addr = scan_addr; |
break; |
} |
} |
|
/* Now call the unwinder callback function. */ |
if (trace_fn != NULL) |
(*trace_fn) (&ctx, state); |
|
/* No need to unwind beyond _Jv_RunMain(), _Jv_ThreadStart or |
main(). */ |
void *jv_runmain |
= (void *)(void (*)(JvVMInitArgs *, jclass, const char *, int, |
const char **, bool))_Jv_RunMain; |
if (ctx.meth_addr == (_Jv_uintptr_t)jv_runmain |
|| ctx.meth_addr == (_Jv_uintptr_t)_Jv_ThreadStart |
|| (ctx.meth_addr - (_Jv_uintptr_t)MAIN_FUNC) < 16) |
break; |
} |
|
return _URC_NO_REASON; |
} |
#endif |
/sh/locks.h
0,0 → 1,84
// locks.h - Thread synchronization primitives. SuperH implementation. |
|
/* Copyright (C) 2002, 2007 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
static unsigned char __cas_lock = 0; |
|
inline static void |
__cas_start_atomic (void) |
{ |
unsigned int val; |
|
do |
__asm__ __volatile__ ("tas.b @%1; movt %0" |
: "=r" (val) |
: "r" (&__cas_lock) |
: "memory"); |
while (val == 0); |
} |
|
inline static void |
__cas_end_atomic (void) |
{ |
__asm__ __volatile__ (" " : : : "memory"); |
__cas_lock = 0; |
} |
|
inline static bool |
compare_and_swap (volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
bool ret; |
|
__cas_start_atomic (); |
if (*addr != old) |
ret = false; |
else |
{ |
*addr = new_val; |
ret = true; |
} |
__cas_end_atomic (); |
|
return ret; |
} |
|
inline static void |
release_set (volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__ (" " : : : "memory"); |
*(addr) = new_val; |
} |
|
inline static bool |
compare_and_swap_release (volatile obj_addr_t *addr, obj_addr_t old, |
obj_addr_t new_val) |
{ |
return compare_and_swap (addr, old, new_val); |
} |
|
inline static void |
read_barrier() |
{ |
__asm__ __volatile__(" " : : : "memory"); |
} |
|
inline static void |
write_barrier() |
{ |
__asm__ __volatile__(" " : : : "memory"); |
} |
|
#endif /* ! __SYSDEP_LOCKS_H__ */ |
/descriptor-y.h
0,0 → 1,5
// Given a function pointer, return the code address. |
|
// The function descriptor is actually multiple words, |
// but we don't care about anything except the first. |
#define UNWRAP_FUNCTION_DESCRIPTOR(X) (*(void **)(X)) |
/generic/locks.h
0,0 → 1,11
// locks.h - Thread synchronization primitives. Generic implementation. |
|
/* Copyright (C) 2002 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#error Thread synchronization primitives not implemented for this platform. |
/generic/backtrace.h
0,0 → 1,23
// backtrace.h - Fallback backtrace implementation. default implementation. |
|
/* Copyright (C) 2005, 2006 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_BACKTRACE_H__ |
#define __SYSDEP_BACKTRACE_H__ |
|
#include <java-stack.h> |
|
/* Unwind through the call stack calling TRACE_FN with STATE for every stack |
frame. Returns the reason why the unwinding was stopped. */ |
_Unwind_Reason_Code |
fallback_backtrace (_Unwind_Trace_Fn, _Jv_UnwindState *) |
{ |
return _URC_NO_REASON; |
} |
#endif |
/s390/locks.h
0,0 → 1,77
// locks.h - Thread synchronization primitives. S/390 implementation. |
|
/* Copyright (C) 2002 Free Software Foundation |
|
This file is part of libgcj. |
|
This software is copyrighted work licensed under the terms of the |
Libgcj License. Please consult the file "LIBGCJ_LICENSE" for |
details. */ |
|
#ifndef __SYSDEP_LOCKS_H__ |
#define __SYSDEP_LOCKS_H__ |
|
typedef size_t obj_addr_t; /* Integer type big enough for object */ |
/* address. */ |
|
// Atomically replace *addr by new_val if it was initially equal to old. |
// Return true if the comparison succeeded. |
// Assumed to have acquire semantics, i.e. later memory operations |
// cannot execute before the compare_and_swap finishes. |
inline static bool |
compare_and_swap(volatile obj_addr_t *addr, |
obj_addr_t old, obj_addr_t new_val) |
{ |
int result; |
|
__asm__ __volatile__ ( |
#ifndef __s390x__ |
" cs %1,%2,0(%3)\n" |
#else |
" csg %1,%2,0(%3)\n" |
#endif |
" ipm %0\n" |
" srl %0,28\n" |
: "=&d" (result), "+d" (old) |
: "d" (new_val), "a" (addr) |
: "cc", "memory"); |
|
return result == 0; |
} |
|
// Set *addr to new_val with release semantics, i.e. making sure |
// that prior loads and stores complete before this |
// assignment. |
inline static void |
release_set(volatile obj_addr_t *addr, obj_addr_t new_val) |
{ |
__asm__ __volatile__("bcr 15,0" : : : "memory"); |
*(addr) = new_val; |
} |
|
// Compare_and_swap with release semantics instead of acquire semantics. |
// On many architecture, the operation makes both guarantees, so the |
// implementation can be the same. |
inline static bool |
compare_and_swap_release(volatile obj_addr_t *addr, |
obj_addr_t old, obj_addr_t new_val) |
{ |
return compare_and_swap(addr, old, new_val); |
} |
|
// Ensure that subsequent instructions do not execute on stale |
// data that was loaded from memory before the barrier. |
inline static void |
read_barrier() |
{ |
__asm__ __volatile__("bcr 15,0" : : : "memory"); |
} |
|
// Ensure that prior stores to memory are completed with respect to other |
// processors. |
inline static void |
write_barrier() |
{ |
__asm__ __volatile__("bcr 15,0" : : : "memory"); |
} |
#endif |