URL
https://opencores.org/ocsvn/openrisc/openrisc/trunk
Subversion Repositories openrisc
Compare Revisions
- This comparison shows the changes necessary to convert path
/openrisc/tags/gnu-dev/fsf-gcc-snapshot-1-mar-12/or1k-gcc/libgo/runtime
- from Rev 747 to Rev 783
- ↔ Reverse comparison
Rev 747 → Rev 783
/go-unsafe-pointer.c
0,0 → 1,101
/* go-unsafe-pointer.c -- unsafe.Pointer type descriptor for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "go-string.h" |
#include "go-type.h" |
|
/* This file provides the type descriptor for the unsafe.Pointer type. |
The unsafe package is defined by the compiler itself, which means |
that there is no package to compile to define the type |
descriptor. */ |
|
extern const struct __go_type_descriptor unsafe_Pointer |
asm ("__go_tdn_libgo_unsafe.unsafe.Pointer"); |
|
/* Used to determine the field alignment. */ |
struct field_align |
{ |
char c; |
void *p; |
}; |
|
/* The reflection string. */ |
#define REFLECTION "unsafe.Pointer" |
static const struct __go_string reflection_string = |
{ |
(const unsigned char *) REFLECTION, |
sizeof REFLECTION - 1 |
}; |
|
const struct __go_type_descriptor unsafe_Pointer = |
{ |
/* __code */ |
GO_UNSAFE_POINTER, |
/* __align */ |
__alignof (void *), |
/* __field_align */ |
offsetof (struct field_align, p) - 1, |
/* __size */ |
sizeof (void *), |
/* __hash */ |
78501163U, |
/* __hashfn */ |
__go_type_hash_identity, |
/* __equalfn */ |
__go_type_equal_identity, |
/* __reflection */ |
&reflection_string, |
/* __uncommon */ |
NULL, |
/* __pointer_to_this */ |
NULL |
}; |
|
/* We also need the type descriptor for the pointer to unsafe.Pointer, |
since any package which refers to that type descriptor will expect |
it to be defined elsewhere. */ |
|
extern const struct __go_ptr_type pointer_unsafe_Pointer |
asm ("__go_td_pN27_libgo_unsafe.unsafe.Pointer"); |
|
/* The reflection string. */ |
#define PREFLECTION "*unsafe.Pointer" |
static const struct __go_string preflection_string = |
{ |
(const unsigned char *) PREFLECTION, |
sizeof PREFLECTION - 1, |
}; |
|
const struct __go_ptr_type pointer_unsafe_Pointer = |
{ |
/* __common */ |
{ |
/* __code */ |
GO_PTR, |
/* __align */ |
__alignof (void *), |
/* __field_align */ |
offsetof (struct field_align, p) - 1, |
/* __size */ |
sizeof (void *), |
/* __hash */ |
1256018616U, |
/* __hashfn */ |
__go_type_hash_identity, |
/* __equalfn */ |
__go_type_equal_identity, |
/* __reflection */ |
&preflection_string, |
/* __uncommon */ |
NULL, |
/* __pointer_to_this */ |
NULL |
}, |
/* __element_type */ |
&unsafe_Pointer |
}; |
/go-construct-map.c
0,0 → 1,34
/* go-construct-map.c -- construct a map from an initializer. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdint.h> |
#include <stdlib.h> |
|
#include "map.h" |
|
struct __go_map * |
__go_construct_map (const struct __go_map_descriptor *descriptor, |
uintptr_t count, uintptr_t entry_size, |
uintptr_t val_offset, uintptr_t val_size, |
const void *ventries) |
{ |
struct __go_map *ret; |
const unsigned char *entries; |
uintptr_t i; |
|
ret = __go_new_map (descriptor, count); |
|
entries = (const unsigned char *) ventries; |
for (i = 0; i < count; ++i) |
{ |
void *val = __go_map_index (ret, entries, 1); |
__builtin_memcpy (val, entries + val_offset, val_size); |
entries += entry_size; |
} |
|
return ret; |
} |
/mcentral.c
0,0 → 1,201
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Central free lists. |
// |
// See malloc.h for an overview. |
// |
// The MCentral doesn't actually contain the list of free objects; the MSpan does. |
// Each MCentral is two lists of MSpans: those with free objects (c->nonempty) |
// and those that are completely allocated (c->empty). |
// |
// TODO(rsc): tcmalloc uses a "transfer cache" to split the list |
// into sections of class_to_transfercount[sizeclass] objects |
// so that it is faster to move those lists between MCaches and MCentrals. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
static bool MCentral_Grow(MCentral *c); |
static void* MCentral_Alloc(MCentral *c); |
static void MCentral_Free(MCentral *c, void *v); |
|
// Initialize a single central free list. |
void |
runtime_MCentral_Init(MCentral *c, int32 sizeclass) |
{ |
c->sizeclass = sizeclass; |
runtime_MSpanList_Init(&c->nonempty); |
runtime_MSpanList_Init(&c->empty); |
} |
|
// Allocate up to n objects from the central free list. |
// Return the number of objects allocated. |
// The objects are linked together by their first words. |
// On return, *pstart points at the first object and *pend at the last. |
int32 |
runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst) |
{ |
MLink *first, *last, *v; |
int32 i; |
|
runtime_lock(c); |
// Replenish central list if empty. |
if(runtime_MSpanList_IsEmpty(&c->nonempty)) { |
if(!MCentral_Grow(c)) { |
runtime_unlock(c); |
*pfirst = nil; |
return 0; |
} |
} |
|
// Copy from list, up to n. |
// First one is guaranteed to work, because we just grew the list. |
first = MCentral_Alloc(c); |
last = first; |
for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) { |
last->next = v; |
last = v; |
} |
last->next = nil; |
c->nfree -= i; |
|
runtime_unlock(c); |
*pfirst = first; |
return i; |
} |
|
// Helper: allocate one object from the central free list. |
static void* |
MCentral_Alloc(MCentral *c) |
{ |
MSpan *s; |
MLink *v; |
|
if(runtime_MSpanList_IsEmpty(&c->nonempty)) |
return nil; |
s = c->nonempty.next; |
s->ref++; |
v = s->freelist; |
s->freelist = v->next; |
if(s->freelist == nil) { |
runtime_MSpanList_Remove(s); |
runtime_MSpanList_Insert(&c->empty, s); |
} |
return v; |
} |
|
// Free n objects back into the central free list. |
// Return the number of objects allocated. |
// The objects are linked together by their first words. |
// On return, *pstart points at the first object and *pend at the last. |
void |
runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *start) |
{ |
MLink *v, *next; |
|
// Assume next == nil marks end of list. |
// n and end would be useful if we implemented |
// the transfer cache optimization in the TODO above. |
USED(n); |
|
runtime_lock(c); |
for(v=start; v; v=next) { |
next = v->next; |
MCentral_Free(c, v); |
} |
runtime_unlock(c); |
} |
|
// Helper: free one object back into the central free list. |
static void |
MCentral_Free(MCentral *c, void *v) |
{ |
MSpan *s; |
MLink *p; |
int32 size; |
|
// Find span for v. |
s = runtime_MHeap_Lookup(&runtime_mheap, v); |
if(s == nil || s->ref == 0) |
runtime_throw("invalid free"); |
|
// Move to nonempty if necessary. |
if(s->freelist == nil) { |
runtime_MSpanList_Remove(s); |
runtime_MSpanList_Insert(&c->nonempty, s); |
} |
|
// Add v back to s's free list. |
p = v; |
p->next = s->freelist; |
s->freelist = p; |
c->nfree++; |
|
// If s is completely freed, return it to the heap. |
if(--s->ref == 0) { |
size = runtime_class_to_size[c->sizeclass]; |
runtime_MSpanList_Remove(s); |
runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); |
*(uintptr*)(s->start<<PageShift) = 1; // needs zeroing |
s->freelist = nil; |
c->nfree -= (s->npages << PageShift) / size; |
runtime_unlock(c); |
runtime_MHeap_Free(&runtime_mheap, s, 0); |
runtime_lock(c); |
} |
} |
|
void |
runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *sizep, int32 *npagesp, int32 *nobj) |
{ |
int32 size; |
int32 npages; |
|
npages = runtime_class_to_allocnpages[sizeclass]; |
size = runtime_class_to_size[sizeclass]; |
*npagesp = npages; |
*sizep = size; |
*nobj = (npages << PageShift) / size; |
} |
|
// Fetch a new span from the heap and |
// carve into objects for the free list. |
static bool |
MCentral_Grow(MCentral *c) |
{ |
int32 i, n, npages; |
uintptr size; |
MLink **tailp, *v; |
byte *p; |
MSpan *s; |
|
runtime_unlock(c); |
runtime_MGetSizeClassInfo(c->sizeclass, &size, &npages, &n); |
s = runtime_MHeap_Alloc(&runtime_mheap, npages, c->sizeclass, 0); |
if(s == nil) { |
// TODO(rsc): Log out of memory |
runtime_lock(c); |
return false; |
} |
|
// Carve span into sequence of blocks. |
tailp = &s->freelist; |
p = (byte*)(s->start << PageShift); |
s->limit = p + size*n; |
for(i=0; i<n; i++) { |
v = (MLink*)p; |
*tailp = v; |
tailp = &v->next; |
p += size; |
} |
*tailp = nil; |
runtime_markspan((byte*)(s->start<<PageShift), size, n, size*n < (s->npages<<PageShift)); |
|
runtime_lock(c); |
c->nfree += n; |
runtime_MSpanList_Insert(&c->nonempty, s); |
return true; |
} |
/interface.h
0,0 → 1,57
/* interface.h -- the interface type for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_INTERFACE_H |
#define LIBGO_INTERFACE_H |
|
#include "go-type.h" |
|
/* A variable of interface type is an instance of this struct, if the |
interface has any methods. */ |
|
struct __go_interface |
{ |
/* A pointer to the interface method table. The first pointer is |
the type descriptor of the object. Subsequent pointers are |
pointers to functions. This is effectively the vtable for this |
interface. The function pointers are in the same order as the |
list in the internal representation of the interface, which sorts |
them by name. */ |
const void **__methods; |
|
/* The object. If the object is a pointer--if the type descriptor |
code is GO_PTR or GO_UNSAFE_POINTER--then this field is the value |
of the object itself. Otherwise this is a pointer to memory |
which holds the value. */ |
void *__object; |
}; |
|
/* A variable of an empty interface type is an instance of this |
struct. */ |
|
struct __go_empty_interface |
{ |
/* The type descriptor of the object. */ |
const struct __go_type_descriptor *__type_descriptor; |
|
/* The object. This is the same as __go_interface above. */ |
void *__object; |
}; |
|
extern void * |
__go_convert_interface (const struct __go_type_descriptor *, |
const struct __go_type_descriptor *); |
|
extern void * |
__go_convert_interface_2 (const struct __go_type_descriptor *, |
const struct __go_type_descriptor *, |
_Bool may_fail); |
|
extern _Bool |
__go_can_convert_to_interface(const struct __go_type_descriptor *, |
const struct __go_type_descriptor *); |
|
#endif /* !defined(LIBGO_INTERFACE_H) */ |
/go-now.c
0,0 → 1,31
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include <stddef.h> |
#include <stdint.h> |
#include <sys/time.h> |
|
// Return current time. This is the implementation of time.now(). |
|
struct time_now_ret |
{ |
int64_t sec; |
int32_t nsec; |
}; |
|
struct time_now_ret now() |
__asm__ ("libgo_time.time.now") |
__attribute__ ((no_split_stack)); |
|
struct time_now_ret |
now() |
{ |
struct timeval tv; |
struct time_now_ret ret; |
|
gettimeofday (&tv, NULL); |
ret.sec = tv.tv_sec; |
ret.nsec = tv.tv_usec * 1000; |
return ret; |
} |
/go-unwind.c
0,0 → 1,440
/* go-unwind.c -- unwind the stack for panic/recover. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "config.h" |
|
#include <stdlib.h> |
#include <unistd.h> |
|
#include "unwind.h" |
#define NO_SIZE_OF_ENCODED_VALUE |
#include "unwind-pe.h" |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-defer.h" |
#include "go-panic.h" |
|
/* The code for a Go exception. */ |
|
#ifdef __ARM_EABI_UNWINDER__ |
static const _Unwind_Exception_Class __go_exception_class = |
{ 'G', 'N', 'U', 'C', 'G', 'O', '\0', '\0' }; |
#else |
static const _Unwind_Exception_Class __go_exception_class = |
((((((((_Unwind_Exception_Class) 'G' |
<< 8 | (_Unwind_Exception_Class) 'N') |
<< 8 | (_Unwind_Exception_Class) 'U') |
<< 8 | (_Unwind_Exception_Class) 'C') |
<< 8 | (_Unwind_Exception_Class) 'G') |
<< 8 | (_Unwind_Exception_Class) 'O') |
<< 8 | (_Unwind_Exception_Class) '\0') |
<< 8 | (_Unwind_Exception_Class) '\0'); |
#endif |
|
|
/* This function is called by exception handlers used when unwinding |
the stack after a recovered panic. The exception handler looks |
like this: |
__go_check_defer (frame); |
return; |
If we have not yet reached the frame we are looking for, we |
continue unwinding. */ |
|
void |
__go_check_defer (_Bool *frame) |
{ |
G *g; |
struct _Unwind_Exception *hdr; |
|
g = runtime_g (); |
|
if (g == NULL) |
{ |
/* Some other language has thrown an exception. We know there |
are no defer handlers, so there is nothing to do. */ |
} |
else if (g->is_foreign) |
{ |
struct __go_panic_stack *n; |
_Bool was_recovered; |
|
/* Some other language has thrown an exception. We need to run |
the local defer handlers. If they call recover, we stop |
unwinding the stack here. */ |
|
n = ((struct __go_panic_stack *) |
__go_alloc (sizeof (struct __go_panic_stack))); |
|
n->__arg.__type_descriptor = NULL; |
n->__arg.__object = NULL; |
n->__was_recovered = 0; |
n->__is_foreign = 1; |
n->__next = g->panic; |
g->panic = n; |
|
while (1) |
{ |
struct __go_defer_stack *d; |
void (*pfn) (void *); |
|
d = g->defer; |
if (d == NULL || d->__frame != frame || d->__pfn == NULL) |
break; |
|
pfn = d->__pfn; |
g->defer = d->__next; |
|
(*pfn) (d->__arg); |
|
__go_free (d); |
|
if (n->__was_recovered) |
{ |
/* The recover function caught the panic thrown by some |
other language. */ |
break; |
} |
} |
|
was_recovered = n->__was_recovered; |
g->panic = n->__next; |
__go_free (n); |
|
if (was_recovered) |
{ |
/* Just return and continue executing Go code. */ |
*frame = 1; |
return; |
} |
|
/* We are panicing through this function. */ |
*frame = 0; |
} |
else if (g->defer != NULL |
&& g->defer->__pfn == NULL |
&& g->defer->__frame == frame) |
{ |
struct __go_defer_stack *d; |
|
/* This is the defer function which called recover. Simply |
return to stop the stack unwind, and let the Go code continue |
to execute. */ |
d = g->defer; |
g->defer = d->__next; |
__go_free (d); |
|
/* We are returning from this function. */ |
*frame = 1; |
|
return; |
} |
|
/* This is some other defer function. It was already run by the |
call to panic, or just above. Rethrow the exception. */ |
|
hdr = (struct _Unwind_Exception *) g->exception; |
|
#ifdef LIBGO_SJLJ_EXCEPTIONS |
_Unwind_SjLj_Resume_or_Rethrow (hdr); |
#else |
#if defined(_LIBUNWIND_STD_ABI) |
_Unwind_RaiseException (hdr); |
#else |
_Unwind_Resume_or_Rethrow (hdr); |
#endif |
#endif |
|
/* Rethrowing the exception should not return. */ |
abort(); |
} |
|
/* Unwind function calls until we reach the one which used a defer |
function which called recover. Each function which uses a defer |
statement will have an exception handler, as shown above. */ |
|
void |
__go_unwind_stack () |
{ |
struct _Unwind_Exception *hdr; |
|
hdr = ((struct _Unwind_Exception *) |
__go_alloc (sizeof (struct _Unwind_Exception))); |
__builtin_memcpy (&hdr->exception_class, &__go_exception_class, |
sizeof hdr->exception_class); |
hdr->exception_cleanup = NULL; |
|
runtime_g ()->exception = hdr; |
|
#ifdef __USING_SJLJ_EXCEPTIONS__ |
_Unwind_SjLj_RaiseException (hdr); |
#else |
_Unwind_RaiseException (hdr); |
#endif |
|
/* Raising an exception should not return. */ |
abort (); |
} |
|
/* The rest of this code is really similar to gcc/unwind-c.c and |
libjava/exception.cc. */ |
|
typedef struct |
{ |
_Unwind_Ptr Start; |
_Unwind_Ptr LPStart; |
_Unwind_Ptr ttype_base; |
const unsigned char *TType; |
const unsigned char *action_table; |
unsigned char ttype_encoding; |
unsigned char call_site_encoding; |
} lsda_header_info; |
|
static const unsigned char * |
parse_lsda_header (struct _Unwind_Context *context, const unsigned char *p, |
lsda_header_info *info) |
{ |
_uleb128_t tmp; |
unsigned char lpstart_encoding; |
|
info->Start = (context ? _Unwind_GetRegionStart (context) : 0); |
|
/* Find @LPStart, the base to which landing pad offsets are relative. */ |
lpstart_encoding = *p++; |
if (lpstart_encoding != DW_EH_PE_omit) |
p = read_encoded_value (context, lpstart_encoding, p, &info->LPStart); |
else |
info->LPStart = info->Start; |
|
/* Find @TType, the base of the handler and exception spec type data. */ |
info->ttype_encoding = *p++; |
if (info->ttype_encoding != DW_EH_PE_omit) |
{ |
p = read_uleb128 (p, &tmp); |
info->TType = p + tmp; |
} |
else |
info->TType = 0; |
|
/* The encoding and length of the call-site table; the action table |
immediately follows. */ |
info->call_site_encoding = *p++; |
p = read_uleb128 (p, &tmp); |
info->action_table = p + tmp; |
|
return p; |
} |
|
/* The personality function is invoked when unwinding the stack due to |
a panic. Its job is to find the cleanup and exception handlers to |
run. We can't split the stack here, because we won't be able to |
unwind from that split. */ |
|
#ifdef __ARM_EABI_UNWINDER__ |
/* ARM EABI personality routines must also unwind the stack. */ |
#define CONTINUE_UNWINDING \ |
do \ |
{ \ |
if (__gnu_unwind_frame (ue_header, context) != _URC_OK) \ |
return _URC_FAILURE; \ |
return _URC_CONTINUE_UNWIND; \ |
} \ |
while (0) |
#else |
#define CONTINUE_UNWINDING return _URC_CONTINUE_UNWIND |
#endif |
|
#ifdef __USING_SJLJ_EXCEPTIONS__ |
#define PERSONALITY_FUNCTION __gccgo_personality_sj0 |
#define __builtin_eh_return_data_regno(x) x |
#else |
#define PERSONALITY_FUNCTION __gccgo_personality_v0 |
#endif |
|
#ifdef __ARM_EABI_UNWINDER__ |
_Unwind_Reason_Code |
PERSONALITY_FUNCTION (_Unwind_State, struct _Unwind_Exception *, |
struct _Unwind_Context *) |
__attribute__ ((no_split_stack, flatten)); |
|
_Unwind_Reason_Code |
PERSONALITY_FUNCTION (_Unwind_State state, |
struct _Unwind_Exception * ue_header, |
struct _Unwind_Context * context) |
#else |
_Unwind_Reason_Code |
PERSONALITY_FUNCTION (int, _Unwind_Action, _Unwind_Exception_Class, |
struct _Unwind_Exception *, struct _Unwind_Context *) |
__attribute__ ((no_split_stack, flatten)); |
|
_Unwind_Reason_Code |
PERSONALITY_FUNCTION (int version, |
_Unwind_Action actions, |
_Unwind_Exception_Class exception_class, |
struct _Unwind_Exception *ue_header, |
struct _Unwind_Context *context) |
#endif |
{ |
lsda_header_info info; |
const unsigned char *language_specific_data, *p, *action_record; |
_Unwind_Ptr landing_pad, ip; |
int ip_before_insn = 0; |
_Bool is_foreign; |
G *g; |
|
#ifdef __ARM_EABI_UNWINDER__ |
_Unwind_Action actions; |
|
switch (state & _US_ACTION_MASK) |
{ |
case _US_VIRTUAL_UNWIND_FRAME: |
actions = _UA_SEARCH_PHASE; |
break; |
|
case _US_UNWIND_FRAME_STARTING: |
actions = _UA_CLEANUP_PHASE; |
if (!(state & _US_FORCE_UNWIND) |
&& ue_header->barrier_cache.sp == _Unwind_GetGR(context, 13)) |
actions |= _UA_HANDLER_FRAME; |
break; |
|
case _US_UNWIND_FRAME_RESUME: |
CONTINUE_UNWINDING; |
break; |
|
default: |
abort(); |
} |
actions |= state & _US_FORCE_UNWIND; |
|
is_foreign = 0; |
|
/* The dwarf unwinder assumes the context structure holds things like the |
function and LSDA pointers. The ARM implementation caches these in |
the exception header (UCB). To avoid rewriting everything we make the |
virtual IP register point at the UCB. */ |
ip = (_Unwind_Ptr) ue_header; |
_Unwind_SetGR (context, 12, ip); |
#else |
if (version != 1) |
return _URC_FATAL_PHASE1_ERROR; |
|
is_foreign = exception_class != __go_exception_class; |
#endif |
|
language_specific_data = (const unsigned char *) |
_Unwind_GetLanguageSpecificData (context); |
|
/* If no LSDA, then there are no handlers or cleanups. */ |
if (! language_specific_data) |
CONTINUE_UNWINDING; |
|
/* Parse the LSDA header. */ |
p = parse_lsda_header (context, language_specific_data, &info); |
#ifdef HAVE_GETIPINFO |
ip = _Unwind_GetIPInfo (context, &ip_before_insn); |
#else |
ip = _Unwind_GetIP (context); |
#endif |
if (! ip_before_insn) |
--ip; |
landing_pad = 0; |
action_record = NULL; |
|
#ifdef __USING_SJLJ_EXCEPTIONS__ |
/* The given "IP" is an index into the call-site table, with two |
exceptions -- -1 means no-action, and 0 means terminate. But |
since we're using uleb128 values, we've not got random access |
to the array. */ |
if ((int) ip <= 0) |
return _URC_CONTINUE_UNWIND; |
else |
{ |
_uleb128_t cs_lp, cs_action; |
do |
{ |
p = read_uleb128 (p, &cs_lp); |
p = read_uleb128 (p, &cs_action); |
} |
while (--ip); |
|
/* Can never have null landing pad for sjlj -- that would have |
been indicated by a -1 call site index. */ |
landing_pad = (_Unwind_Ptr)cs_lp + 1; |
if (cs_action) |
action_record = info.action_table + cs_action - 1; |
goto found_something; |
} |
#else |
/* Search the call-site table for the action associated with this IP. */ |
while (p < info.action_table) |
{ |
_Unwind_Ptr cs_start, cs_len, cs_lp; |
_uleb128_t cs_action; |
|
/* Note that all call-site encodings are "absolute" displacements. */ |
p = read_encoded_value (0, info.call_site_encoding, p, &cs_start); |
p = read_encoded_value (0, info.call_site_encoding, p, &cs_len); |
p = read_encoded_value (0, info.call_site_encoding, p, &cs_lp); |
p = read_uleb128 (p, &cs_action); |
|
/* The table is sorted, so if we've passed the ip, stop. */ |
if (ip < info.Start + cs_start) |
p = info.action_table; |
else if (ip < info.Start + cs_start + cs_len) |
{ |
if (cs_lp) |
landing_pad = info.LPStart + cs_lp; |
if (cs_action) |
action_record = info.action_table + cs_action - 1; |
goto found_something; |
} |
} |
#endif |
|
/* IP is not in table. No associated cleanups. */ |
CONTINUE_UNWINDING; |
|
found_something: |
if (landing_pad == 0) |
{ |
/* IP is present, but has a null landing pad. |
No handler to be run. */ |
CONTINUE_UNWINDING; |
} |
|
if (actions & _UA_SEARCH_PHASE) |
{ |
if (action_record == 0) |
{ |
/* This indicates a cleanup rather than an exception |
handler. */ |
CONTINUE_UNWINDING; |
} |
|
return _URC_HANDLER_FOUND; |
} |
|
/* It's possible for g to be NULL here for an exception thrown by a |
language other than Go. */ |
g = runtime_g (); |
if (g == NULL) |
{ |
if (!is_foreign) |
abort (); |
} |
else |
{ |
g->exception = ue_header; |
g->is_foreign = is_foreign; |
} |
|
_Unwind_SetGR (context, __builtin_eh_return_data_regno (0), |
(_Unwind_Ptr) ue_header); |
_Unwind_SetGR (context, __builtin_eh_return_data_regno (1), 0); |
_Unwind_SetIP (context, landing_pad); |
return _URC_INSTALL_CONTEXT; |
} |
/go-append.c
0,0 → 1,68
/* go-append.c -- the go builtin append function. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-type.h" |
#include "go-panic.h" |
#include "array.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
/* We should be OK if we don't split the stack here, since the only |
libc functions we call are memcpy and memmove. If we don't do |
this, we will always split the stack, because of memcpy and |
memmove. */ |
extern struct __go_open_array |
__go_append (struct __go_open_array, void *, uintptr_t, uintptr_t) |
__attribute__ ((no_split_stack)); |
|
struct __go_open_array |
__go_append (struct __go_open_array a, void *bvalues, uintptr_t bcount, |
uintptr_t element_size) |
{ |
uintptr_t ucount; |
int count; |
|
if (bvalues == NULL || bcount == 0) |
return a; |
|
ucount = (uintptr_t) a.__count + bcount; |
count = (int) ucount; |
if ((uintptr_t) count != ucount || count <= a.__count) |
runtime_panicstring ("append: slice overflow"); |
|
if (count > a.__capacity) |
{ |
int m; |
void *n; |
|
m = a.__capacity; |
if (m == 0) |
m = (int) bcount; |
else |
{ |
do |
{ |
if (a.__count < 1024) |
m += m; |
else |
m += m / 4; |
} |
while (m < count); |
} |
|
n = __go_alloc (m * element_size); |
__builtin_memcpy (n, a.__values, a.__count * element_size); |
|
a.__values = n; |
a.__capacity = m; |
} |
|
__builtin_memmove ((char *) a.__values + a.__count * element_size, |
bvalues, bcount * element_size); |
a.__count = count; |
return a; |
} |
/go-getgoroot.c
0,0 → 1,26
/* go-getgoroot.c -- getgoroot function for runtime package. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdlib.h> |
|
#include "go-string.h" |
|
struct __go_string getgoroot (void) asm ("libgo_runtime.runtime.getgoroot"); |
|
struct __go_string |
getgoroot () |
{ |
const char *p; |
struct __go_string ret; |
|
p = getenv ("GOROOT"); |
ret.__data = (const unsigned char *) p; |
if (ret.__data == NULL) |
ret.__length = 0; |
else |
ret.__length = __builtin_strlen (p); |
return ret; |
} |
/go-trampoline.c
0,0 → 1,53
/* go-trampoline.c -- allocate a trampoline for a nested function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "config.h" |
|
#include <stddef.h> |
#include <stdint.h> |
#include <unistd.h> |
|
#ifdef HAVE_SYS_MMAN_H |
#include <sys/mman.h> |
#endif |
|
#include "go-alloc.h" |
#include "go-assert.h" |
|
/* In order to build a trampoline we need space which is both writable |
and executable. We currently just allocate a whole page. This |
needs to be more system dependent. */ |
|
void * |
__go_allocate_trampoline (uintptr_t size, void *closure) |
{ |
unsigned int page_size; |
void *ret; |
size_t off; |
|
page_size = getpagesize (); |
__go_assert (page_size >= size); |
ret = __go_alloc (2 * page_size - 1); |
ret = (void *) (((uintptr_t) ret + page_size - 1) |
& ~ ((uintptr_t) page_size - 1)); |
|
/* Because the garbage collector only looks at correct address |
offsets, we need to ensure that it will see the closure |
address. */ |
off = ((size + sizeof (void *) - 1) / sizeof (void *)) * sizeof (void *); |
__go_assert (size + off + sizeof (void *) <= page_size); |
__builtin_memcpy (ret + off, &closure, sizeof (void *)); |
|
#ifdef HAVE_SYS_MMAN_H |
{ |
int i; |
i = mprotect (ret, size, PROT_READ | PROT_WRITE | PROT_EXEC); |
__go_assert (i == 0); |
} |
#endif |
|
return ret; |
} |
/go-copy.c
0,0 → 1,22
/* go-append.c -- the go builtin copy function. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdint.h> |
|
/* We should be OK if we don't split the stack here, since we are just |
calling memmove which shouldn't need much stack. If we don't do |
this we will always split the stack, because of memmove. */ |
|
extern void |
__go_copy (void *, void *, uintptr_t) |
__attribute__ ((no_split_stack)); |
|
void |
__go_copy (void *a, void *b, uintptr_t len) |
{ |
__builtin_memmove (a, b, len); |
} |
/go-strplus.c
0,0 → 1,31
/* go-strplus.c -- the go string append function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_string |
__go_string_plus (struct __go_string s1, struct __go_string s2) |
{ |
int len; |
unsigned char *retdata; |
struct __go_string ret; |
|
if (s1.__length == 0) |
return s2; |
else if (s2.__length == 0) |
return s1; |
|
len = s1.__length + s2.__length; |
retdata = runtime_mallocgc (len, FlagNoPointers, 1, 0); |
__builtin_memcpy (retdata, s1.__data, s1.__length); |
__builtin_memcpy (retdata + s1.__length, s2.__data, s2.__length); |
ret.__data = retdata; |
ret.__length = len; |
return ret; |
} |
/go-signal.c
0,0 → 1,496
/* go-signal.c -- signal handling for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <signal.h> |
#include <stdlib.h> |
#include <unistd.h> |
#include <sys/time.h> |
|
#include "runtime.h" |
#include "go-assert.h" |
#include "go-panic.h" |
|
#ifndef SA_RESTART |
#define SA_RESTART 0 |
#endif |
|
#ifdef USING_SPLIT_STACK |
|
extern void __splitstack_getcontext(void *context[10]); |
|
extern void __splitstack_setcontext(void *context[10]); |
|
#endif |
|
#define C SigCatch |
#define I SigIgnore |
#define R SigRestart |
#define Q SigQueue |
#define P SigPanic |
|
/* Signal actions. This collects the sigtab tables for several |
different targets from the master library. SIGKILL, SIGCONT, and |
SIGSTOP are not listed, as we don't want to set signal handlers for |
them. */ |
|
SigTab runtime_sigtab[] = { |
#ifdef SIGHUP |
{ SIGHUP, Q + R }, |
#endif |
#ifdef SIGINT |
{ SIGINT, Q + R }, |
#endif |
#ifdef SIGQUIT |
{ SIGQUIT, C }, |
#endif |
#ifdef SIGILL |
{ SIGILL, C }, |
#endif |
#ifdef SIGTRAP |
{ SIGTRAP, C }, |
#endif |
#ifdef SIGABRT |
{ SIGABRT, C }, |
#endif |
#ifdef SIGBUS |
{ SIGBUS, C + P }, |
#endif |
#ifdef SIGFPE |
{ SIGFPE, C + P }, |
#endif |
#ifdef SIGUSR1 |
{ SIGUSR1, Q + I + R }, |
#endif |
#ifdef SIGSEGV |
{ SIGSEGV, C + P }, |
#endif |
#ifdef SIGUSR2 |
{ SIGUSR2, Q + I + R }, |
#endif |
#ifdef SIGPIPE |
{ SIGPIPE, I }, |
#endif |
#ifdef SIGALRM |
{ SIGALRM, Q + I + R }, |
#endif |
#ifdef SIGTERM |
{ SIGTERM, Q + R }, |
#endif |
#ifdef SIGSTKFLT |
{ SIGSTKFLT, C }, |
#endif |
#ifdef SIGCHLD |
{ SIGCHLD, Q + I + R }, |
#endif |
#ifdef SIGTSTP |
{ SIGTSTP, Q + I + R }, |
#endif |
#ifdef SIGTTIN |
{ SIGTTIN, Q + I + R }, |
#endif |
#ifdef SIGTTOU |
{ SIGTTOU, Q + I + R }, |
#endif |
#ifdef SIGURG |
{ SIGURG, Q + I + R }, |
#endif |
#ifdef SIGXCPU |
{ SIGXCPU, Q + I + R }, |
#endif |
#ifdef SIGXFSZ |
{ SIGXFSZ, Q + I + R }, |
#endif |
#ifdef SIGVTALRM |
{ SIGVTALRM, Q + I + R }, |
#endif |
#ifdef SIGPROF |
{ SIGPROF, Q + I + R }, |
#endif |
#ifdef SIGWINCH |
{ SIGWINCH, Q + I + R }, |
#endif |
#ifdef SIGIO |
{ SIGIO, Q + I + R }, |
#endif |
#ifdef SIGPWR |
{ SIGPWR, Q + I + R }, |
#endif |
#ifdef SIGSYS |
{ SIGSYS, C }, |
#endif |
#ifdef SIGEMT |
{ SIGEMT, C }, |
#endif |
#ifdef SIGINFO |
{ SIGINFO, Q + I + R }, |
#endif |
#ifdef SIGTHR |
{ SIGTHR, Q + I + R }, |
#endif |
{ -1, 0 } |
}; |
#undef C |
#undef I |
#undef R |
#undef Q |
#undef P |
|
/* Handle a signal, for cases where we don't panic. We can split the |
stack here. */ |
|
static void |
sig_handler (int sig) |
{ |
int i; |
|
#ifdef SIGPROF |
if (sig == SIGPROF) |
{ |
/* FIXME. */ |
runtime_sigprof (0, 0, nil, nil); |
return; |
} |
#endif |
|
for (i = 0; runtime_sigtab[i].sig != -1; ++i) |
{ |
struct sigaction sa; |
|
if (runtime_sigtab[i].sig != sig) |
continue; |
|
if ((runtime_sigtab[i].flags & SigQueue) != 0) |
{ |
if (__go_sigsend (sig) |
|| (runtime_sigtab[sig].flags & SigIgnore) != 0) |
return; |
runtime_exit (2); // SIGINT, SIGTERM, etc |
} |
|
if (runtime_panicking) |
runtime_exit (2); |
runtime_panicking = 1; |
|
/* We should do a stack backtrace here. Until we can do that, |
we reraise the signal in order to get a slightly better |
report from the shell. */ |
|
memset (&sa, 0, sizeof sa); |
|
sa.sa_handler = SIG_DFL; |
|
i = sigemptyset (&sa.sa_mask); |
__go_assert (i == 0); |
|
if (sigaction (sig, &sa, NULL) != 0) |
abort (); |
|
raise (sig); |
|
runtime_exit (2); |
} |
|
__builtin_unreachable (); |
} |
|
/* The start of handling a signal which panics. */ |
|
static void |
sig_panic_leadin (int sig) |
{ |
int i; |
sigset_t clear; |
|
if (runtime_m ()->mallocing) |
{ |
runtime_printf ("caught signal while mallocing: %d\n", sig); |
runtime_throw ("caught signal while mallocing"); |
} |
|
/* The signal handler blocked signals; unblock them. */ |
i = sigfillset (&clear); |
__go_assert (i == 0); |
i = sigprocmask (SIG_UNBLOCK, &clear, NULL); |
__go_assert (i == 0); |
} |
|
#ifdef SA_SIGINFO |
|
/* Signal dispatch for signals which panic, on systems which support |
SA_SIGINFO. This is called on the thread stack, and as such it is |
permitted to split the stack. */ |
|
static void |
sig_panic_info_handler (int sig, siginfo_t *info, |
void *context __attribute__ ((unused))) |
{ |
if (runtime_g () == NULL) |
{ |
sig_handler (sig); |
return; |
} |
|
sig_panic_leadin (sig); |
|
switch (sig) |
{ |
#ifdef SIGBUS |
case SIGBUS: |
if (info->si_code == BUS_ADRERR && (uintptr_t) info->si_addr < 0x1000) |
runtime_panicstring ("invalid memory address or " |
"nil pointer dereference"); |
runtime_printf ("unexpected fault address %p\n", info->si_addr); |
runtime_throw ("fault"); |
#endif |
|
#ifdef SIGSEGV |
case SIGSEGV: |
if ((info->si_code == 0 |
|| info->si_code == SEGV_MAPERR |
|| info->si_code == SEGV_ACCERR) |
&& (uintptr_t) info->si_addr < 0x1000) |
runtime_panicstring ("invalid memory address or " |
"nil pointer dereference"); |
runtime_printf ("unexpected fault address %p\n", info->si_addr); |
runtime_throw ("fault"); |
#endif |
|
#ifdef SIGFPE |
case SIGFPE: |
switch (info->si_code) |
{ |
case FPE_INTDIV: |
runtime_panicstring ("integer divide by zero"); |
case FPE_INTOVF: |
runtime_panicstring ("integer overflow"); |
} |
runtime_panicstring ("floating point error"); |
#endif |
} |
|
/* All signals with SigPanic should be in cases above, and this |
handler should only be invoked for those signals. */ |
__builtin_unreachable (); |
} |
|
#else /* !defined (SA_SIGINFO) */ |
|
static void |
sig_panic_handler (int sig) |
{ |
if (runtime_g () == NULL) |
{ |
sig_handler (sig); |
return; |
} |
|
sig_panic_leadin (sig); |
|
switch (sig) |
{ |
#ifdef SIGBUS |
case SIGBUS: |
runtime_panicstring ("invalid memory address or " |
"nil pointer dereference"); |
#endif |
|
#ifdef SIGSEGV |
case SIGSEGV: |
runtime_panicstring ("invalid memory address or " |
"nil pointer dereference"); |
#endif |
|
#ifdef SIGFPE |
case SIGFPE: |
runtime_panicstring ("integer divide by zero or floating point error"); |
#endif |
} |
|
/* All signals with SigPanic should be in cases above, and this |
handler should only be invoked for those signals. */ |
__builtin_unreachable (); |
} |
|
#endif /* !defined (SA_SIGINFO) */ |
|
/* Ignore a signal. This is called on the alternate signal stack so |
it may not split the stack. */ |
|
static void sig_ignore (int) __attribute__ ((no_split_stack)); |
|
static void |
sig_ignore (int sig __attribute__ ((unused))) |
{ |
} |
|
/* A signal handler used for signals which are not going to panic. |
This is called on the alternate signal stack so it may not split |
the stack. */ |
|
static void |
sig_tramp (int) __attribute__ ((no_split_stack)); |
|
static void |
sig_tramp (int sig) |
{ |
G *gp; |
M *mp; |
|
/* We are now running on the stack registered via sigaltstack. |
(Actually there is a small span of time between runtime_siginit |
and sigaltstack when the program starts.) */ |
gp = runtime_g (); |
mp = runtime_m (); |
|
if (gp != NULL) |
{ |
#ifdef USING_SPLIT_STACK |
__splitstack_getcontext (&gp->stack_context[0]); |
#endif |
} |
|
if (gp != NULL && mp->gsignal != NULL) |
{ |
/* We are running on the signal stack. Set the split stack |
context so that the stack guards are checked correctly. */ |
#ifdef USING_SPLIT_STACK |
__splitstack_setcontext (&mp->gsignal->stack_context[0]); |
#endif |
} |
|
sig_handler (sig); |
|
/* We are going to return back to the signal trampoline and then to |
whatever we were doing before we got the signal. Restore the |
split stack context so that stack guards are checked |
correctly. */ |
|
if (gp != NULL) |
{ |
#ifdef USING_SPLIT_STACK |
__splitstack_setcontext (&gp->stack_context[0]); |
#endif |
} |
} |
|
/* Initialize signal handling for Go. This is called when the program |
starts. */ |
|
void |
runtime_initsig (int32 queue) |
{ |
struct sigaction sa; |
int i; |
|
siginit (); |
|
memset (&sa, 0, sizeof sa); |
|
i = sigfillset (&sa.sa_mask); |
__go_assert (i == 0); |
|
for (i = 0; runtime_sigtab[i].sig != -1; ++i) |
{ |
if (runtime_sigtab[i].flags == 0) |
continue; |
if ((runtime_sigtab[i].flags & SigQueue) != queue) |
continue; |
|
if ((runtime_sigtab[i].flags & (SigCatch | SigQueue)) != 0) |
{ |
if ((runtime_sigtab[i].flags & SigPanic) == 0) |
{ |
sa.sa_flags = SA_ONSTACK; |
sa.sa_handler = sig_tramp; |
} |
else |
{ |
#ifdef SA_SIGINFO |
sa.sa_flags = SA_SIGINFO; |
sa.sa_sigaction = sig_panic_info_handler; |
#else |
sa.sa_flags = 0; |
sa.sa_handler = sig_panic_handler; |
#endif |
} |
} |
else |
{ |
sa.sa_flags = SA_ONSTACK; |
sa.sa_handler = sig_ignore; |
} |
|
if ((runtime_sigtab[i].flags & SigRestart) != 0) |
sa.sa_flags |= SA_RESTART; |
|
if (sigaction (runtime_sigtab[i].sig, &sa, NULL) != 0) |
__go_assert (0); |
} |
} |
|
void |
runtime_resetcpuprofiler(int32 hz) |
{ |
#ifdef SIGPROF |
struct itimerval it; |
struct sigaction sa; |
int i; |
|
memset (&it, 0, sizeof it); |
|
memset (&sa, 0, sizeof sa); |
i = sigfillset (&sa.sa_mask); |
__go_assert (i == 0); |
|
if (hz == 0) |
{ |
i = setitimer (ITIMER_PROF, &it, NULL); |
__go_assert (i == 0); |
|
sa.sa_handler = SIG_IGN; |
i = sigaction (SIGPROF, &sa, NULL); |
__go_assert (i == 0); |
} |
else |
{ |
sa.sa_handler = sig_handler; |
sa.sa_flags = SA_RESTART; |
i = sigaction (SIGPROF, &sa, NULL); |
__go_assert (i == 0); |
|
it.it_interval.tv_sec = 0; |
it.it_interval.tv_usec = 1000000 / hz; |
it.it_value = it.it_interval; |
i = setitimer (ITIMER_PROF, &it, NULL); |
__go_assert (i == 0); |
} |
#endif |
|
runtime_m()->profilehz = hz; |
} |
|
/* Used by the os package to raise SIGPIPE. */ |
|
void os_sigpipe (void) __asm__ ("libgo_os.os.sigpipe"); |
|
void |
os_sigpipe (void) |
{ |
struct sigaction sa; |
int i; |
|
memset (&sa, 0, sizeof sa); |
|
sa.sa_handler = SIG_DFL; |
|
i = sigemptyset (&sa.sa_mask); |
__go_assert (i == 0); |
|
if (sigaction (SIGPIPE, &sa, NULL) != 0) |
abort (); |
|
raise (SIGPIPE); |
} |
/array.h
0,0 → 1,28
/* array.h -- the open array type for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_ARRAY_H |
#define LIBGO_ARRAY_H |
|
/* An open array is an instance of this structure. */ |
|
struct __go_open_array |
{ |
/* The elements of the array. In use in the compiler this is a |
pointer to the element type. */ |
void* __values; |
/* The number of elements in the array. Note that this is "int", |
not "size_t". The language definition says that "int" is large |
enough to hold the size of any allocated object. Using "int" |
saves 8 bytes per slice header on a 64-bit system with 32-bit |
ints. */ |
int __count; |
/* The capacity of the array--the number of elements that can fit in |
the __VALUES field. */ |
int __capacity; |
}; |
|
#endif /* !defined(LIBGO_ARRAY_H) */ |
/msize.c
0,0 → 1,169
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Malloc small size classes. |
// |
// See malloc.h for overview. |
// |
// The size classes are chosen so that rounding an allocation |
// request up to the next size class wastes at most 12.5% (1.125x). |
// |
// Each size class has its own page count that gets allocated |
// and chopped up when new objects of the size class are needed. |
// That page count is chosen so that chopping up the run of |
// pages into objects of the given size wastes at most 12.5% (1.125x) |
// of the memory. It is not necessary that the cutoff here be |
// the same as above. |
// |
// The two sources of waste multiply, so the worst possible case |
// for the above constraints would be that allocations of some |
// size might have a 26.6% (1.266x) overhead. |
// In practice, only one of the wastes comes into play for a |
// given size (sizes < 512 waste mainly on the round-up, |
// sizes > 512 waste mainly on the page chopping). |
// |
// TODO(rsc): Compute max waste for any given size. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
int32 runtime_class_to_size[NumSizeClasses]; |
int32 runtime_class_to_allocnpages[NumSizeClasses]; |
int32 runtime_class_to_transfercount[NumSizeClasses]; |
|
// The SizeToClass lookup is implemented using two arrays, |
// one mapping sizes <= 1024 to their class and one mapping |
// sizes >= 1024 and <= MaxSmallSize to their class. |
// All objects are 8-aligned, so the first array is indexed by |
// the size divided by 8 (rounded up). Objects >= 1024 bytes |
// are 128-aligned, so the second array is indexed by the |
// size divided by 128 (rounded up). The arrays are filled in |
// by InitSizes. |
|
static int32 size_to_class8[1024/8 + 1]; |
static int32 size_to_class128[(MaxSmallSize-1024)/128 + 1]; |
|
int32 |
runtime_SizeToClass(int32 size) |
{ |
if(size > MaxSmallSize) |
runtime_throw("SizeToClass - invalid size"); |
if(size > 1024-8) |
return size_to_class128[(size-1024+127) >> 7]; |
return size_to_class8[(size+7)>>3]; |
} |
|
void |
runtime_InitSizes(void) |
{ |
int32 align, sizeclass, size, nextsize, n; |
uint32 i; |
uintptr allocsize, npages; |
|
// Initialize the runtime_class_to_size table (and choose class sizes in the process). |
runtime_class_to_size[0] = 0; |
sizeclass = 1; // 0 means no class |
align = 8; |
for(size = align; size <= MaxSmallSize; size += align) { |
if((size&(size-1)) == 0) { // bump alignment once in a while |
if(size >= 2048) |
align = 256; |
else if(size >= 128) |
align = size / 8; |
else if(size >= 16) |
align = 16; // required for x86 SSE instructions, if we want to use them |
} |
if((align&(align-1)) != 0) |
runtime_throw("InitSizes - bug"); |
|
// Make the allocnpages big enough that |
// the leftover is less than 1/8 of the total, |
// so wasted space is at most 12.5%. |
allocsize = PageSize; |
while(allocsize%size > allocsize/8) |
allocsize += PageSize; |
npages = allocsize >> PageShift; |
|
// If the previous sizeclass chose the same |
// allocation size and fit the same number of |
// objects into the page, we might as well |
// use just this size instead of having two |
// different sizes. |
if(sizeclass > 1 |
&& (int32)npages == runtime_class_to_allocnpages[sizeclass-1] |
&& allocsize/size == allocsize/runtime_class_to_size[sizeclass-1]) { |
runtime_class_to_size[sizeclass-1] = size; |
continue; |
} |
|
runtime_class_to_allocnpages[sizeclass] = npages; |
runtime_class_to_size[sizeclass] = size; |
sizeclass++; |
} |
if(sizeclass != NumSizeClasses) { |
// runtime_printf("sizeclass=%d NumSizeClasses=%d\n", sizeclass, NumSizeClasses); |
runtime_throw("InitSizes - bad NumSizeClasses"); |
} |
|
// Initialize the size_to_class tables. |
nextsize = 0; |
for (sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) { |
for(; nextsize < 1024 && nextsize <= runtime_class_to_size[sizeclass]; nextsize+=8) |
size_to_class8[nextsize/8] = sizeclass; |
if(nextsize >= 1024) |
for(; nextsize <= runtime_class_to_size[sizeclass]; nextsize += 128) |
size_to_class128[(nextsize-1024)/128] = sizeclass; |
} |
|
// Double-check SizeToClass. |
if(0) { |
for(n=0; n < MaxSmallSize; n++) { |
sizeclass = runtime_SizeToClass(n); |
if(sizeclass < 1 || sizeclass >= NumSizeClasses || runtime_class_to_size[sizeclass] < n) { |
// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]); |
// runtime_printf("incorrect SizeToClass"); |
goto dump; |
} |
if(sizeclass > 1 && runtime_class_to_size[sizeclass-1] >= n) { |
// runtime_printf("size=%d sizeclass=%d runtime_class_to_size=%d\n", n, sizeclass, runtime_class_to_size[sizeclass]); |
// runtime_printf("SizeToClass too big"); |
goto dump; |
} |
} |
} |
|
// Copy out for statistics table. |
for(i=0; i<nelem(runtime_class_to_size); i++) |
mstats.by_size[i].size = runtime_class_to_size[i]; |
|
// Initialize the runtime_class_to_transfercount table. |
for(sizeclass = 1; sizeclass < NumSizeClasses; sizeclass++) { |
n = 64*1024 / runtime_class_to_size[sizeclass]; |
if(n < 2) |
n = 2; |
if(n > 32) |
n = 32; |
runtime_class_to_transfercount[sizeclass] = n; |
} |
return; |
|
dump: |
if(1){ |
runtime_printf("NumSizeClasses=%d\n", NumSizeClasses); |
runtime_printf("runtime_class_to_size:"); |
for(sizeclass=0; sizeclass<NumSizeClasses; sizeclass++) |
runtime_printf(" %d", runtime_class_to_size[sizeclass]); |
runtime_printf("\n\n"); |
runtime_printf("size_to_class8:"); |
for(i=0; i<nelem(size_to_class8); i++) |
runtime_printf(" %d=>%d(%d)\n", i*8, size_to_class8[i], runtime_class_to_size[size_to_class8[i]]); |
runtime_printf("\n"); |
runtime_printf("size_to_class128:"); |
for(i=0; i<nelem(size_to_class128); i++) |
runtime_printf(" %d=>%d(%d)\n", i*128, size_to_class128[i], runtime_class_to_size[size_to_class128[i]]); |
runtime_printf("\n"); |
} |
runtime_throw("InitSizes failed"); |
} |
/go-defer.c
0,0 → 1,77
/* go-defer.c -- manage the defer stack. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-panic.h" |
#include "go-defer.h" |
|
/* This function is called each time we need to defer a call. */ |
|
void |
__go_defer (_Bool *frame, void (*pfn) (void *), void *arg) |
{ |
G *g; |
struct __go_defer_stack *n; |
|
g = runtime_g (); |
n = (struct __go_defer_stack *) __go_alloc (sizeof (struct __go_defer_stack)); |
n->__next = g->defer; |
n->__frame = frame; |
n->__panic = g->panic; |
n->__pfn = pfn; |
n->__arg = arg; |
n->__retaddr = NULL; |
g->defer = n; |
} |
|
/* This function is called when we want to undefer the stack. */ |
|
void |
__go_undefer (_Bool *frame) |
{ |
G *g; |
|
g = runtime_g (); |
while (g->defer != NULL && g->defer->__frame == frame) |
{ |
struct __go_defer_stack *d; |
void (*pfn) (void *); |
|
d = g->defer; |
pfn = d->__pfn; |
d->__pfn = NULL; |
|
if (pfn != NULL) |
(*pfn) (d->__arg); |
|
g->defer = d->__next; |
__go_free (d); |
|
/* Since we are executing a defer function here, we know we are |
returning from the calling function. If the calling |
function, or one of its callees, paniced, then the defer |
functions would be executed by __go_panic. */ |
*frame = 1; |
} |
} |
|
/* This function is called to record the address to which the deferred |
function returns. This may in turn be checked by __go_can_recover. |
The frontend relies on this function returning false. */ |
|
_Bool |
__go_set_defer_retaddr (void *retaddr) |
{ |
G *g; |
|
g = runtime_g (); |
if (g->defer != NULL) |
g->defer->__retaddr = retaddr; |
return 0; |
} |
/go-typedesc-equal.c
0,0 → 1,38
/* go-typedesc-equal.c -- return whether two type descriptors are equal. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "go-type.h" |
|
/* Compare type descriptors for equality. This is necessary because |
types may have different descriptors in different shared libraries. |
Also, unnamed types may have multiple type descriptors even in a |
single shared library. */ |
|
_Bool |
__go_type_descriptors_equal (const struct __go_type_descriptor *td1, |
const struct __go_type_descriptor *td2) |
{ |
if (td1 == td2) |
return 1; |
/* In a type switch we can get a NULL descriptor. */ |
if (td1 == NULL || td2 == NULL) |
return 0; |
if (td1->__code != td2->__code || td1->__hash != td2->__hash) |
return 0; |
if (td1->__uncommon != NULL && td1->__uncommon->__name != NULL) |
{ |
if (td2->__uncommon == NULL || td2->__uncommon->__name == NULL) |
return 0; |
return (__go_ptr_strings_equal (td1->__uncommon->__name, |
td2->__uncommon->__name) |
&& __go_ptr_strings_equal (td1->__uncommon->__pkg_path, |
td2->__uncommon->__pkg_path)); |
} |
if (td2->__uncommon != NULL && td2->__uncommon->__name != NULL) |
return 0; |
return __go_ptr_strings_equal (td1->__reflection, td2->__reflection); |
} |
/go-int-array-to-string.c
0,0 → 1,86
/* go-int-array-to-string.c -- convert an array of ints to a string in Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-assert.h" |
#include "go-string.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_string |
__go_int_array_to_string (const void* p, int len) |
{ |
const int *ints; |
int slen; |
int i; |
unsigned char *retdata; |
struct __go_string ret; |
unsigned char *s; |
|
ints = (const int *) p; |
|
slen = 0; |
for (i = 0; i < len; ++i) |
{ |
int v; |
|
v = ints[i]; |
|
if (v > 0x10ffff) |
v = 0xfffd; |
|
if (v <= 0x7f) |
slen += 1; |
else if (v <= 0x7ff) |
slen += 2; |
else if (v <= 0xffff) |
slen += 3; |
else |
slen += 4; |
} |
|
retdata = runtime_mallocgc (slen, FlagNoPointers, 1, 0); |
ret.__data = retdata; |
ret.__length = slen; |
|
s = retdata; |
for (i = 0; i < len; ++i) |
{ |
int v; |
|
v = ints[i]; |
|
/* If V is out of range for UTF-8, substitute the replacement |
character. */ |
if (v > 0x10ffff) |
v = 0xfffd; |
|
if (v <= 0x7f) |
*s++ = v; |
else if (v <= 0x7ff) |
{ |
*s++ = 0xc0 | ((v >> 6) & 0x1f); |
*s++ = 0x80 | (v & 0x3f); |
} |
else if (v <= 0xffff) |
{ |
*s++ = 0xe0 | ((v >> 12) & 0xf); |
*s++ = 0x80 | ((v >> 6) & 0x3f); |
*s++ = 0x80 | (v & 0x3f); |
} |
else |
{ |
*s++ = 0xf0 | ((v >> 18) & 0x7); |
*s++ = 0x80 | ((v >> 12) & 0x3f); |
*s++ = 0x80 | ((v >> 6) & 0x3f); |
*s++ = 0x80 | (v & 0x3f); |
} |
} |
|
__go_assert (s - retdata == slen); |
|
return ret; |
} |
/iface.goc
0,0 → 1,138
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
package runtime |
#include "runtime.h" |
#include "go-type.h" |
#include "interface.h" |
|
typedef struct __go_type_descriptor descriptor; |
typedef const struct __go_type_descriptor const_descriptor; |
typedef struct __go_interface interface; |
typedef struct __go_empty_interface empty_interface; |
|
// Compare two type descriptors. |
func ifacetypeeq(a *descriptor, b *descriptor) (eq bool) { |
eq = __go_type_descriptors_equal(a, b); |
} |
|
// Return the descriptor for an empty interface type.n |
func efacetype(e empty_interface) (d *const_descriptor) { |
return e.__type_descriptor; |
} |
|
// Return the descriptor for a non-empty interface type. |
func ifacetype(i interface) (d *const_descriptor) { |
if (i.__methods == nil) { |
return nil; |
} |
d = i.__methods[0]; |
} |
|
// Convert an empty interface to an empty interface. |
func ifaceE2E2(e empty_interface) (ret empty_interface, ok bool) { |
if(((uintptr_t)e.__type_descriptor&reflectFlags) != 0) |
runtime_panicstring("invalid interface value"); |
ret = e; |
ok = ret.__type_descriptor != nil; |
} |
|
// Convert a non-empty interface to an empty interface. |
func ifaceI2E2(i interface) (ret empty_interface, ok bool) { |
if (i.__methods == nil) { |
ret.__type_descriptor = nil; |
ret.__object = nil; |
ok = 0; |
} else { |
ret.__type_descriptor = i.__methods[0]; |
ret.__object = i.__object; |
ok = 1; |
} |
} |
|
// Convert an empty interface to a non-empty interface. |
func ifaceE2I2(inter *descriptor, e empty_interface) (ret interface, ok bool) { |
if(((uintptr_t)e.__type_descriptor&reflectFlags) != 0) |
runtime_panicstring("invalid interface value"); |
if (e.__type_descriptor == nil) { |
ret.__methods = nil; |
ret.__object = nil; |
ok = 0; |
} else { |
ret.__methods = __go_convert_interface_2(inter, |
e.__type_descriptor, |
1); |
ret.__object = e.__object; |
ok = ret.__methods != nil; |
} |
} |
|
// Convert a non-empty interface to a non-empty interface. |
func ifaceI2I2(inter *descriptor, i interface) (ret interface, ok bool) { |
if (i.__methods == nil) { |
ret.__methods = nil; |
ret.__object = nil; |
ok = 0; |
} else { |
ret.__methods = __go_convert_interface_2(inter, |
i.__methods[0], 1); |
ret.__object = i.__object; |
ok = ret.__methods != nil; |
} |
} |
|
// Convert an empty interface to a pointer type. |
func ifaceE2T2P(inter *descriptor, e empty_interface) (ret *void, ok bool) { |
if(((uintptr_t)e.__type_descriptor&reflectFlags) != 0) |
runtime_panicstring("invalid interface value"); |
if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) { |
ret = nil; |
ok = 0; |
} else { |
ret = e.__object; |
ok = 1; |
} |
} |
|
// Convert a non-empty interface to a pointer type. |
func ifaceI2T2P(inter *descriptor, i interface) (ret *void, ok bool) { |
if (i.__methods == nil |
|| !__go_type_descriptors_equal(inter, i.__methods[0])) { |
ret = nil; |
ok = 0; |
} else { |
ret = i.__object; |
ok = 1; |
} |
} |
|
// Convert an empty interface to a non-pointer type. |
func ifaceE2T2(inter *descriptor, e empty_interface, ret *void) (ok bool) { |
if(((uintptr_t)e.__type_descriptor&reflectFlags) != 0) |
runtime_panicstring("invalid interface value"); |
if (!__go_type_descriptors_equal(inter, e.__type_descriptor)) { |
__builtin_memset(ret, 0, inter->__size); |
ok = 0; |
} else { |
__builtin_memcpy(ret, e.__object, inter->__size); |
ok = 1; |
} |
} |
|
// Convert a non-empty interface to a non-pointer type. |
func ifaceI2T2(inter *descriptor, i interface, ret *void) (ok bool) { |
if (i.__methods == nil |
|| !__go_type_descriptors_equal(inter, i.__methods[0])) { |
__builtin_memset(ret, 0, inter->__size); |
ok = 0; |
} else { |
__builtin_memcpy(ret, i.__object, inter->__size); |
ok = 1; |
} |
} |
|
// Return whether we can convert an interface to a type. |
func ifaceI2Tp(to *descriptor, from *descriptor) (ok bool) { |
ok = __go_can_convert_to_interface(to, from); |
} |
/go-string-to-int-array.c
0,0 → 1,51
/* go-string-to-int-array.c -- convert a string to an array of ints in Go. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-alloc.h" |
#include "go-string.h" |
#include "array.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_open_array |
__go_string_to_int_array (struct __go_string str) |
{ |
size_t c; |
const unsigned char *p; |
const unsigned char *pend; |
uint32_t *data; |
uint32_t *pd; |
struct __go_open_array ret; |
|
c = 0; |
p = str.__data; |
pend = p + str.__length; |
while (p < pend) |
{ |
int rune; |
|
++c; |
p += __go_get_rune (p, pend - p, &rune); |
} |
|
data = (uint32_t *) runtime_mallocgc (c * sizeof (uint32_t), FlagNoPointers, |
1, 0); |
p = str.__data; |
pd = data; |
while (p < pend) |
{ |
int rune; |
|
p += __go_get_rune (p, pend - p, &rune); |
*pd++ = rune; |
} |
|
ret.__values = (void *) data; |
ret.__count = c; |
ret.__capacity = c; |
return ret; |
} |
/go-defer.h
0,0 → 1,37
/* go-defer.h -- the defer stack. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
struct __go_panic_stack; |
|
/* The defer stack is a list of these structures. */ |
|
struct __go_defer_stack |
{ |
/* The next entry in the stack. */ |
struct __go_defer_stack *__next; |
|
/* The stack variable for the function which called this defer |
statement. This is set to 1 if we are returning from that |
function, 0 if we are panicing through it. */ |
_Bool *__frame; |
|
/* The value of the panic stack when this function is deferred. |
This function can not recover this value from the panic stack. |
This can happen if a deferred function uses its own defer |
statement. */ |
struct __go_panic_stack *__panic; |
|
/* The function to call. */ |
void (*__pfn) (void *); |
|
/* The argument to pass to the function. */ |
void *__arg; |
|
/* The return address that a recover thunk matches against. This is |
set by __go_set_defer_retaddr which is called by the thunks |
created by defer statements. */ |
const void *__retaddr; |
}; |
/go-reflect-call.c
0,0 → 1,523
/* go-reflect-call.c -- call reflection support for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdio.h> |
#include <stdint.h> |
#include <stdlib.h> |
|
#include "config.h" |
|
#include "go-alloc.h" |
#include "go-assert.h" |
#include "go-type.h" |
#include "runtime.h" |
|
#ifdef USE_LIBFFI |
|
#include "ffi.h" |
|
/* The functions in this file are only called from reflect_call. As |
reflect_call calls a libffi function, which will be compiled |
without -fsplit-stack, it will always run with a large stack. */ |
|
static ffi_type *go_array_to_ffi (const struct __go_array_type *) |
__attribute__ ((no_split_stack)); |
static ffi_type *go_slice_to_ffi (const struct __go_slice_type *) |
__attribute__ ((no_split_stack)); |
static ffi_type *go_struct_to_ffi (const struct __go_struct_type *) |
__attribute__ ((no_split_stack)); |
static ffi_type *go_string_to_ffi (void) __attribute__ ((no_split_stack)); |
static ffi_type *go_interface_to_ffi (void) __attribute__ ((no_split_stack)); |
static ffi_type *go_complex_to_ffi (ffi_type *) |
__attribute__ ((no_split_stack)); |
static ffi_type *go_type_to_ffi (const struct __go_type_descriptor *) |
__attribute__ ((no_split_stack)); |
static ffi_type *go_func_return_ffi (const struct __go_func_type *) |
__attribute__ ((no_split_stack)); |
static void go_func_to_cif (const struct __go_func_type *, _Bool, _Bool, |
ffi_cif *) |
__attribute__ ((no_split_stack)); |
static size_t go_results_size (const struct __go_func_type *) |
__attribute__ ((no_split_stack)); |
static void go_set_results (const struct __go_func_type *, unsigned char *, |
void **) |
__attribute__ ((no_split_stack)); |
|
/* Return an ffi_type for a Go array type. The libffi library does |
not have any builtin support for passing arrays as values. We work |
around this by pretending that the array is a struct. */ |
|
static ffi_type * |
go_array_to_ffi (const struct __go_array_type *descriptor) |
{ |
ffi_type *ret; |
uintptr_t len; |
ffi_type *element; |
uintptr_t i; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
len = descriptor->__len; |
ret->elements = (ffi_type **) __go_alloc ((len + 1) * sizeof (ffi_type *)); |
element = go_type_to_ffi (descriptor->__element_type); |
for (i = 0; i < len; ++i) |
ret->elements[i] = element; |
ret->elements[len] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a Go slice type. This describes the |
__go_open_array type defines in array.h. */ |
|
static ffi_type * |
go_slice_to_ffi ( |
const struct __go_slice_type *descriptor __attribute__ ((unused))) |
{ |
ffi_type *ret; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
ret->elements = (ffi_type **) __go_alloc (4 * sizeof (ffi_type *)); |
ret->elements[0] = &ffi_type_pointer; |
ret->elements[1] = &ffi_type_sint; |
ret->elements[2] = &ffi_type_sint; |
ret->elements[3] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a Go struct type. */ |
|
static ffi_type * |
go_struct_to_ffi (const struct __go_struct_type *descriptor) |
{ |
ffi_type *ret; |
int field_count; |
const struct __go_struct_field *fields; |
int i; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
field_count = descriptor->__fields.__count; |
fields = (const struct __go_struct_field *) descriptor->__fields.__values; |
ret->elements = (ffi_type **) __go_alloc ((field_count + 1) |
* sizeof (ffi_type *)); |
for (i = 0; i < field_count; ++i) |
ret->elements[i] = go_type_to_ffi (fields[i].__type); |
ret->elements[field_count] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a Go string type. This describes the |
__go_string struct. */ |
|
static ffi_type * |
go_string_to_ffi (void) |
{ |
ffi_type *ret; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *)); |
ret->elements[0] = &ffi_type_pointer; |
ret->elements[1] = &ffi_type_sint; |
ret->elements[2] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a Go interface type. This describes the |
__go_interface and __go_empty_interface structs. */ |
|
static ffi_type * |
go_interface_to_ffi (void) |
{ |
ffi_type *ret; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *)); |
ret->elements[0] = &ffi_type_pointer; |
ret->elements[1] = &ffi_type_pointer; |
ret->elements[2] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a Go complex type. */ |
|
static ffi_type * |
go_complex_to_ffi (ffi_type *float_type) |
{ |
ffi_type *ret; |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
ret->elements = (ffi_type **) __go_alloc (3 * sizeof (ffi_type *)); |
ret->elements[0] = float_type; |
ret->elements[1] = float_type; |
ret->elements[2] = NULL; |
return ret; |
} |
|
/* Return an ffi_type for a type described by a |
__go_type_descriptor. */ |
|
static ffi_type * |
go_type_to_ffi (const struct __go_type_descriptor *descriptor) |
{ |
switch (descriptor->__code & GO_CODE_MASK) |
{ |
case GO_BOOL: |
if (sizeof (_Bool) == 1) |
return &ffi_type_uint8; |
else if (sizeof (_Bool) == sizeof (int)) |
return &ffi_type_uint; |
abort (); |
case GO_FLOAT32: |
if (sizeof (float) == 4) |
return &ffi_type_float; |
abort (); |
case GO_FLOAT64: |
if (sizeof (double) == 8) |
return &ffi_type_double; |
abort (); |
case GO_COMPLEX64: |
if (sizeof (float) == 4) |
return go_complex_to_ffi (&ffi_type_float); |
abort (); |
case GO_COMPLEX128: |
if (sizeof (double) == 8) |
return go_complex_to_ffi (&ffi_type_double); |
abort (); |
case GO_INT16: |
return &ffi_type_sint16; |
case GO_INT32: |
return &ffi_type_sint32; |
case GO_INT64: |
return &ffi_type_sint64; |
case GO_INT8: |
return &ffi_type_sint8; |
case GO_INT: |
return &ffi_type_sint; |
case GO_UINT16: |
return &ffi_type_uint16; |
case GO_UINT32: |
return &ffi_type_uint32; |
case GO_UINT64: |
return &ffi_type_uint64; |
case GO_UINT8: |
return &ffi_type_uint8; |
case GO_UINT: |
return &ffi_type_uint; |
case GO_UINTPTR: |
if (sizeof (void *) == 2) |
return &ffi_type_uint16; |
else if (sizeof (void *) == 4) |
return &ffi_type_uint32; |
else if (sizeof (void *) == 8) |
return &ffi_type_uint64; |
abort (); |
case GO_ARRAY: |
return go_array_to_ffi ((const struct __go_array_type *) descriptor); |
case GO_SLICE: |
return go_slice_to_ffi ((const struct __go_slice_type *) descriptor); |
case GO_STRUCT: |
return go_struct_to_ffi ((const struct __go_struct_type *) descriptor); |
case GO_STRING: |
return go_string_to_ffi (); |
case GO_INTERFACE: |
return go_interface_to_ffi (); |
case GO_CHAN: |
case GO_FUNC: |
case GO_MAP: |
case GO_PTR: |
case GO_UNSAFE_POINTER: |
/* These types are always pointers, and for FFI purposes nothing |
else matters. */ |
return &ffi_type_pointer; |
default: |
abort (); |
} |
} |
|
/* Return the return type for a function, given the number of out |
parameters and their types. */ |
|
static ffi_type * |
go_func_return_ffi (const struct __go_func_type *func) |
{ |
int count; |
const struct __go_type_descriptor **types; |
ffi_type *ret; |
int i; |
|
count = func->__out.__count; |
if (count == 0) |
return &ffi_type_void; |
|
types = (const struct __go_type_descriptor **) func->__out.__values; |
|
if (count == 1) |
return go_type_to_ffi (types[0]); |
|
ret = (ffi_type *) __go_alloc (sizeof (ffi_type)); |
ret->type = FFI_TYPE_STRUCT; |
ret->elements = (ffi_type **) __go_alloc ((count + 1) * sizeof (ffi_type *)); |
for (i = 0; i < count; ++i) |
ret->elements[i] = go_type_to_ffi (types[i]); |
ret->elements[count] = NULL; |
return ret; |
} |
|
/* Build an ffi_cif structure for a function described by a |
__go_func_type structure. */ |
|
static void |
go_func_to_cif (const struct __go_func_type *func, _Bool is_interface, |
_Bool is_method, ffi_cif *cif) |
{ |
int num_params; |
const struct __go_type_descriptor **in_types; |
size_t num_args; |
ffi_type **args; |
int off; |
int i; |
ffi_type *rettype; |
ffi_status status; |
|
num_params = func->__in.__count; |
in_types = ((const struct __go_type_descriptor **) |
func->__in.__values); |
|
num_args = num_params + (is_interface ? 1 : 0); |
args = (ffi_type **) __go_alloc (num_args * sizeof (ffi_type *)); |
i = 0; |
off = 0; |
if (is_interface) |
{ |
args[0] = &ffi_type_pointer; |
off = 1; |
} |
else if (is_method) |
{ |
args[0] = &ffi_type_pointer; |
i = 1; |
} |
for (; i < num_params; ++i) |
args[i + off] = go_type_to_ffi (in_types[i]); |
|
rettype = go_func_return_ffi (func); |
|
status = ffi_prep_cif (cif, FFI_DEFAULT_ABI, num_args, rettype, args); |
__go_assert (status == FFI_OK); |
} |
|
/* Get the total size required for the result parameters of a |
function. */ |
|
static size_t |
go_results_size (const struct __go_func_type *func) |
{ |
int count; |
const struct __go_type_descriptor **types; |
size_t off; |
size_t maxalign; |
int i; |
|
count = func->__out.__count; |
if (count == 0) |
return 0; |
|
types = (const struct __go_type_descriptor **) func->__out.__values; |
|
/* A single integer return value is always promoted to a full |
word. */ |
if (count == 1) |
{ |
switch (types[0]->__code & GO_CODE_MASK) |
{ |
case GO_BOOL: |
case GO_INT8: |
case GO_INT16: |
case GO_INT32: |
case GO_UINT8: |
case GO_UINT16: |
case GO_UINT32: |
case GO_INT: |
case GO_UINT: |
return sizeof (ffi_arg); |
|
default: |
break; |
} |
} |
|
off = 0; |
maxalign = 0; |
for (i = 0; i < count; ++i) |
{ |
size_t align; |
|
align = types[i]->__field_align; |
if (align > maxalign) |
maxalign = align; |
off = (off + align - 1) & ~ (align - 1); |
off += types[i]->__size; |
} |
|
off = (off + maxalign - 1) & ~ (maxalign - 1); |
|
return off; |
} |
|
/* Copy the results of calling a function via FFI from CALL_RESULT |
into the addresses in RESULTS. */ |
|
static void |
go_set_results (const struct __go_func_type *func, unsigned char *call_result, |
void **results) |
{ |
int count; |
const struct __go_type_descriptor **types; |
size_t off; |
int i; |
|
count = func->__out.__count; |
if (count == 0) |
return; |
|
types = (const struct __go_type_descriptor **) func->__out.__values; |
|
/* A single integer return value is always promoted to a full |
word. */ |
if (count == 1) |
{ |
switch (types[0]->__code & GO_CODE_MASK) |
{ |
case GO_BOOL: |
case GO_INT8: |
case GO_INT16: |
case GO_INT32: |
case GO_UINT8: |
case GO_UINT16: |
case GO_UINT32: |
case GO_INT: |
case GO_UINT: |
{ |
union |
{ |
unsigned char buf[sizeof (ffi_arg)]; |
ffi_arg v; |
} u; |
ffi_arg v; |
|
__builtin_memcpy (&u.buf, call_result, sizeof (ffi_arg)); |
v = u.v; |
|
switch (types[0]->__size) |
{ |
case 1: |
{ |
uint8_t b; |
|
b = (uint8_t) v; |
__builtin_memcpy (results[0], &b, 1); |
} |
break; |
|
case 2: |
{ |
uint16_t s; |
|
s = (uint16_t) v; |
__builtin_memcpy (results[0], &s, 2); |
} |
break; |
|
case 4: |
{ |
uint32_t w; |
|
w = (uint32_t) v; |
__builtin_memcpy (results[0], &w, 4); |
} |
break; |
|
case 8: |
{ |
uint64_t d; |
|
d = (uint64_t) v; |
__builtin_memcpy (results[0], &d, 8); |
} |
break; |
|
default: |
abort (); |
} |
} |
return; |
|
default: |
break; |
} |
} |
|
off = 0; |
for (i = 0; i < count; ++i) |
{ |
size_t align; |
size_t size; |
|
align = types[i]->__field_align; |
size = types[i]->__size; |
off = (off + align - 1) & ~ (align - 1); |
__builtin_memcpy (results[i], call_result + off, size); |
off += size; |
} |
} |
|
/* Call a function. The type of the function is FUNC_TYPE, and the |
address is FUNC_ADDR. PARAMS is an array of parameter addresses. |
RESULTS is an array of result addresses. */ |
|
void |
reflect_call (const struct __go_func_type *func_type, const void *func_addr, |
_Bool is_interface, _Bool is_method, void **params, |
void **results) |
{ |
ffi_cif cif; |
unsigned char *call_result; |
|
__go_assert ((func_type->__common.__code & GO_CODE_MASK) == GO_FUNC); |
go_func_to_cif (func_type, is_interface, is_method, &cif); |
|
call_result = (unsigned char *) malloc (go_results_size (func_type)); |
|
ffi_call (&cif, func_addr, call_result, params); |
|
/* Some day we may need to free result values if RESULTS is |
NULL. */ |
if (results != NULL) |
go_set_results (func_type, call_result, results); |
|
free (call_result); |
} |
|
#else /* !defined(USE_LIBFFI) */ |
|
void |
reflect_call (const struct __go_func_type *func_type __attribute__ ((unused)), |
const void *func_addr __attribute__ ((unused)), |
_Bool is_interface __attribute__ ((unused)), |
_Bool is_method __attribute__ ((unused)), |
void **params __attribute__ ((unused)), |
void **results __attribute__ ((unused))) |
{ |
/* Without FFI there is nothing we can do. */ |
runtime_throw("libgo built without FFI does not support " |
"reflect.Call or runtime.SetFinalizer"); |
} |
|
#endif /* !defined(USE_LIBFFI) */ |
/go-reflect-map.c
0,0 → 1,240
/* go-reflect-map.c -- map reflection support for Go. |
|
Copyright 2009, 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdlib.h> |
#include <stdint.h> |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-assert.h" |
#include "go-type.h" |
#include "map.h" |
|
/* This file implements support for reflection on maps. These |
functions are called from reflect/value.go. */ |
|
struct mapaccess_ret |
{ |
uintptr_t val; |
_Bool pres; |
}; |
|
extern struct mapaccess_ret mapaccess (struct __go_map_type *, uintptr_t, |
uintptr_t) |
asm ("libgo_reflect.reflect.mapaccess"); |
|
struct mapaccess_ret |
mapaccess (struct __go_map_type *mt, uintptr_t m, uintptr_t key_i) |
{ |
struct __go_map *map = (struct __go_map *) m; |
void *key; |
const struct __go_type_descriptor *key_descriptor; |
void *p; |
const struct __go_type_descriptor *val_descriptor; |
struct mapaccess_ret ret; |
void *val; |
void *pv; |
|
__go_assert (mt->__common.__code == GO_MAP); |
|
key_descriptor = mt->__key_type; |
if (__go_is_pointer_type (key_descriptor)) |
key = &key_i; |
else |
key = (void *) key_i; |
|
if (map == NULL) |
p = NULL; |
else |
p = __go_map_index (map, key, 0); |
|
val_descriptor = mt->__val_type; |
if (__go_is_pointer_type (val_descriptor)) |
{ |
val = NULL; |
pv = &val; |
} |
else |
{ |
val = __go_alloc (val_descriptor->__size); |
pv = val; |
} |
|
if (p == NULL) |
ret.pres = 0; |
else |
{ |
__builtin_memcpy (pv, p, val_descriptor->__size); |
ret.pres = 1; |
} |
|
ret.val = (uintptr_t) val; |
return ret; |
} |
|
extern void mapassign (struct __go_map_type *, uintptr_t, uintptr_t, |
uintptr_t, _Bool) |
asm ("libgo_reflect.reflect.mapassign"); |
|
void |
mapassign (struct __go_map_type *mt, uintptr_t m, uintptr_t key_i, |
uintptr_t val_i, _Bool pres) |
{ |
struct __go_map *map = (struct __go_map *) m; |
const struct __go_type_descriptor *key_descriptor; |
void *key; |
|
__go_assert (mt->__common.__code == GO_MAP); |
|
if (map == NULL) |
runtime_panicstring ("assignment to entry in nil map"); |
|
key_descriptor = mt->__key_type; |
if (__go_is_pointer_type (key_descriptor)) |
key = &key_i; |
else |
key = (void *) key_i; |
|
if (!pres) |
__go_map_delete (map, key); |
else |
{ |
void *p; |
const struct __go_type_descriptor *val_descriptor; |
void *pv; |
|
p = __go_map_index (map, key, 1); |
|
val_descriptor = mt->__val_type; |
if (__go_is_pointer_type (val_descriptor)) |
pv = &val_i; |
else |
pv = (void *) val_i; |
__builtin_memcpy (p, pv, val_descriptor->__size); |
} |
} |
|
extern int32_t maplen (uintptr_t) |
asm ("libgo_reflect.reflect.maplen"); |
|
int32_t |
maplen (uintptr_t m) |
{ |
struct __go_map *map = (struct __go_map *) m; |
|
if (map == NULL) |
return 0; |
return (int32_t) map->__element_count; |
} |
|
extern unsigned char *mapiterinit (struct __go_map_type *, uintptr_t) |
asm ("libgo_reflect.reflect.mapiterinit"); |
|
unsigned char * |
mapiterinit (struct __go_map_type *mt, uintptr_t m) |
{ |
struct __go_hash_iter *it; |
|
__go_assert (mt->__common.__code == GO_MAP); |
it = __go_alloc (sizeof (struct __go_hash_iter)); |
__go_mapiterinit ((struct __go_map *) m, it); |
return (unsigned char *) it; |
} |
|
extern void mapiternext (unsigned char *) |
asm ("libgo_reflect.reflect.mapiternext"); |
|
void |
mapiternext (unsigned char *it) |
{ |
__go_mapiternext ((struct __go_hash_iter *) it); |
} |
|
struct mapiterkey_ret |
{ |
uintptr_t key; |
_Bool ok; |
}; |
|
extern struct mapiterkey_ret mapiterkey (unsigned char *) |
asm ("libgo_reflect.reflect.mapiterkey"); |
|
struct mapiterkey_ret |
mapiterkey (unsigned char *ita) |
{ |
struct __go_hash_iter *it = (struct __go_hash_iter *) ita; |
struct mapiterkey_ret ret; |
|
if (it->entry == NULL) |
{ |
ret.key = 0; |
ret.ok = 0; |
} |
else |
{ |
const struct __go_type_descriptor *key_descriptor; |
void *key; |
void *pk; |
|
key_descriptor = it->map->__descriptor->__map_descriptor->__key_type; |
if (__go_is_pointer_type (key_descriptor)) |
{ |
key = NULL; |
pk = &key; |
} |
else |
{ |
key = __go_alloc (key_descriptor->__size); |
pk = key; |
} |
|
__go_mapiter1 (it, pk); |
|
ret.key = (uintptr_t) key; |
ret.ok = 1; |
} |
|
return ret; |
} |
|
/* Make a new map. We have to build our own map descriptor. */ |
|
extern uintptr_t makemap (const struct __go_map_type *) |
asm ("libgo_reflect.reflect.makemap"); |
|
uintptr_t |
makemap (const struct __go_map_type *t) |
{ |
struct __go_map_descriptor *md; |
unsigned int o; |
const struct __go_type_descriptor *kt; |
const struct __go_type_descriptor *vt; |
struct __go_map* map; |
void *ret; |
|
/* FIXME: Reference count. */ |
md = (struct __go_map_descriptor *) __go_alloc (sizeof (*md)); |
md->__map_descriptor = t; |
o = sizeof (void *); |
kt = t->__key_type; |
o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1); |
md->__key_offset = o; |
o += kt->__size; |
vt = t->__val_type; |
o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1); |
md->__val_offset = o; |
o += vt->__size; |
o = (o + sizeof (void *) - 1) & ~ (sizeof (void *) - 1); |
o = (o + kt->__field_align - 1) & ~ (kt->__field_align - 1); |
o = (o + vt->__field_align - 1) & ~ (vt->__field_align - 1); |
md->__entry_size = o; |
|
map = __go_new_map (md, 0); |
|
ret = __go_alloc (sizeof (void *)); |
__builtin_memcpy (ret, &map, sizeof (void *)); |
return (uintptr_t) ret; |
} |
/rtems-task-variable-add.c
0,0 → 1,24
/* rtems-task-variable-add.c -- adding a task specific variable in RTEMS OS. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <rtems/error.h> |
#include <rtems/system.h> |
#include <rtems/rtems/tasks.h> |
|
#include "go-assert.h" |
|
/* RTEMS does not support GNU TLS extension __thread. */ |
void |
__wrap_rtems_task_variable_add (void **var) |
{ |
rtems_status_code sc = rtems_task_variable_add (RTEMS_SELF, var, NULL); |
if (sc != RTEMS_SUCCESSFUL) |
{ |
rtems_error (sc, "rtems_task_variable_add failed"); |
__go_assert (0); |
} |
} |
|
/thread-linux.c
0,0 → 1,111
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include "runtime.h" |
|
#include <errno.h> |
#include <string.h> |
#include <time.h> |
#include <sys/types.h> |
#include <sys/stat.h> |
#include <fcntl.h> |
#include <unistd.h> |
#include <syscall.h> |
#include <linux/futex.h> |
|
typedef struct timespec Timespec; |
|
// Atomically, |
// if(*addr == val) sleep |
// Might be woken up spuriously; that's allowed. |
// Don't sleep longer than ns; ns < 0 means forever. |
void |
runtime_futexsleep(uint32 *addr, uint32 val, int64 ns) |
{ |
Timespec ts, *tsp; |
|
if(ns < 0) |
tsp = nil; |
else { |
ts.tv_sec = ns/1000000000LL; |
ts.tv_nsec = ns%1000000000LL; |
// Avoid overflow |
if(ts.tv_sec > 1<<30) |
ts.tv_sec = 1<<30; |
tsp = &ts; |
} |
|
// Some Linux kernels have a bug where futex of |
// FUTEX_WAIT returns an internal error code |
// as an errno. Libpthread ignores the return value |
// here, and so can we: as it says a few lines up, |
// spurious wakeups are allowed. |
syscall(__NR_futex, addr, FUTEX_WAIT, val, tsp, nil, 0); |
} |
|
// If any procs are sleeping on addr, wake up at most cnt. |
void |
runtime_futexwakeup(uint32 *addr, uint32 cnt) |
{ |
int64 ret; |
|
ret = syscall(__NR_futex, addr, FUTEX_WAKE, cnt, nil, nil, 0); |
|
if(ret >= 0) |
return; |
|
// I don't know that futex wakeup can return |
// EAGAIN or EINTR, but if it does, it would be |
// safe to loop and call futex again. |
runtime_printf("futexwakeup addr=%p returned %lld\n", addr, (long long)ret); |
*(int32*)0x1006 = 0x1006; |
} |
|
#ifndef O_CLOEXEC |
#define O_CLOEXEC 0 |
#endif |
|
static int32 |
getproccount(void) |
{ |
int32 fd, rd, cnt, cpustrlen; |
const char *cpustr; |
const byte *pos; |
byte *bufpos; |
byte buf[256]; |
|
fd = open("/proc/stat", O_RDONLY|O_CLOEXEC, 0); |
if(fd == -1) |
return 1; |
cnt = 0; |
bufpos = buf; |
cpustr = "\ncpu"; |
cpustrlen = strlen(cpustr); |
for(;;) { |
rd = read(fd, bufpos, sizeof(buf)-cpustrlen); |
if(rd == -1) |
break; |
bufpos[rd] = 0; |
for(pos=buf; (pos=(const byte*)strstr((const char*)pos, cpustr)) != nil; cnt++, pos++) { |
} |
if(rd < cpustrlen) |
break; |
memmove(buf, bufpos+rd-cpustrlen+1, cpustrlen-1); |
bufpos = buf+cpustrlen-1; |
} |
close(fd); |
return cnt ? cnt : 1; |
} |
|
void |
runtime_osinit(void) |
{ |
runtime_ncpu = getproccount(); |
} |
|
void |
runtime_goenvs(void) |
{ |
runtime_goenvs_unix(); |
} |
/go-caller.c
0,0 → 1,51
/* go-caller.c -- runtime.Caller and runtime.FuncForPC for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
/* Implement runtime.Caller. */ |
|
#include <stdint.h> |
|
#include "go-string.h" |
|
/* The values returned by runtime.Caller. */ |
|
struct caller_ret |
{ |
uintptr_t pc; |
struct __go_string file; |
int line; |
_Bool ok; |
}; |
|
/* Implement runtime.Caller. */ |
|
struct caller_ret Caller (int n) asm ("libgo_runtime.runtime.Caller"); |
|
struct caller_ret |
Caller (int n __attribute__ ((unused))) |
{ |
struct caller_ret ret; |
|
/* A proper implementation needs to dig through the debugging |
information. */ |
ret.pc = (uint64_t) (uintptr_t) __builtin_return_address (0); |
ret.file.__data = NULL; |
ret.file.__length = 0; |
ret.line = 0; |
ret.ok = 0; |
|
return ret; |
} |
|
/* Implement runtime.FuncForPC. */ |
|
void *FuncForPC (uintptr_t) asm ("libgo_runtime.runtime.FuncForPC"); |
|
void * |
FuncForPC(uintptr_t pc __attribute__ ((unused))) |
{ |
return NULL; |
} |
/go-byte-array-to-string.c
0,0 → 1,25
/* go-byte-array-to-string.c -- convert an array of bytes to a string in Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_string |
__go_byte_array_to_string (const void* p, int len) |
{ |
const unsigned char *bytes; |
unsigned char *retdata; |
struct __go_string ret; |
|
bytes = (const unsigned char *) p; |
retdata = runtime_mallocgc (len, FlagNoPointers, 1, 0); |
__builtin_memcpy (retdata, bytes, len); |
ret.__data = retdata; |
ret.__length = len; |
return ret; |
} |
/mfinal.c
0,0 → 1,214
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
enum { debug = 0 }; |
|
typedef struct Fin Fin; |
struct Fin |
{ |
void (*fn)(void*); |
const struct __go_func_type *ft; |
}; |
|
// Finalizer hash table. Direct hash, linear scan, at most 3/4 full. |
// Table size is power of 3 so that hash can be key % max. |
// Key[i] == (void*)-1 denotes free but formerly occupied entry |
// (doesn't stop the linear scan). |
// Key and val are separate tables because the garbage collector |
// must be instructed to ignore the pointers in key but follow the |
// pointers in val. |
typedef struct Fintab Fintab; |
struct Fintab |
{ |
Lock; |
void **fkey; |
Fin *val; |
int32 nkey; // number of non-nil entries in key |
int32 ndead; // number of dead (-1) entries in key |
int32 max; // size of key, val allocations |
}; |
|
#define TABSZ 17 |
#define TAB(p) (&fintab[((uintptr)(p)>>3)%TABSZ]) |
|
static struct { |
Fintab; |
uint8 pad[0 /* CacheLineSize - sizeof(Fintab) */]; |
} fintab[TABSZ]; |
|
static void |
addfintab(Fintab *t, void *k, void (*fn)(void*), const struct __go_func_type *ft) |
{ |
int32 i, j; |
|
i = (uintptr)k % (uintptr)t->max; |
for(j=0; j<t->max; j++) { |
if(t->fkey[i] == nil) { |
t->nkey++; |
goto ret; |
} |
if(t->fkey[i] == (void*)-1) { |
t->ndead--; |
goto ret; |
} |
if(++i == t->max) |
i = 0; |
} |
|
// cannot happen - table is known to be non-full |
runtime_throw("finalizer table inconsistent"); |
|
ret: |
t->fkey[i] = k; |
t->val[i].fn = fn; |
t->val[i].ft = ft; |
} |
|
static bool |
lookfintab(Fintab *t, void *k, bool del, Fin *f) |
{ |
int32 i, j; |
|
if(t->max == 0) |
return false; |
i = (uintptr)k % (uintptr)t->max; |
for(j=0; j<t->max; j++) { |
if(t->fkey[i] == nil) |
return false; |
if(t->fkey[i] == k) { |
if(f) |
*f = t->val[i]; |
if(del) { |
t->fkey[i] = (void*)-1; |
t->val[i].fn = nil; |
t->val[i].ft = nil; |
t->ndead++; |
} |
return true; |
} |
if(++i == t->max) |
i = 0; |
} |
|
// cannot happen - table is known to be non-full |
runtime_throw("finalizer table inconsistent"); |
return false; |
} |
|
static void |
resizefintab(Fintab *tab) |
{ |
Fintab newtab; |
void *k; |
int32 i; |
|
runtime_memclr((byte*)&newtab, sizeof newtab); |
newtab.max = tab->max; |
if(newtab.max == 0) |
newtab.max = 3*3*3; |
else if(tab->ndead < tab->nkey/2) { |
// grow table if not many dead values. |
// otherwise just rehash into table of same size. |
newtab.max *= 3; |
} |
|
newtab.fkey = runtime_mallocgc(newtab.max*sizeof newtab.fkey[0], FlagNoPointers, 0, 1); |
newtab.val = runtime_mallocgc(newtab.max*sizeof newtab.val[0], 0, 0, 1); |
|
for(i=0; i<tab->max; i++) { |
k = tab->fkey[i]; |
if(k != nil && k != (void*)-1) |
addfintab(&newtab, k, tab->val[i].fn, tab->val[i].ft); |
} |
|
runtime_free(tab->fkey); |
runtime_free(tab->val); |
|
tab->fkey = newtab.fkey; |
tab->val = newtab.val; |
tab->nkey = newtab.nkey; |
tab->ndead = newtab.ndead; |
tab->max = newtab.max; |
} |
|
bool |
runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft) |
{ |
Fintab *tab; |
byte *base; |
|
if(debug) { |
if(!runtime_mlookup(p, &base, nil, nil) || p != base) |
runtime_throw("addfinalizer on invalid pointer"); |
} |
|
tab = TAB(p); |
runtime_lock(tab); |
if(f == nil) { |
if(lookfintab(tab, p, true, nil)) |
runtime_setblockspecial(p, false); |
runtime_unlock(tab); |
return true; |
} |
|
if(lookfintab(tab, p, false, nil)) { |
runtime_unlock(tab); |
return false; |
} |
|
if(tab->nkey >= tab->max/2+tab->max/4) { |
// keep table at most 3/4 full: |
// allocate new table and rehash. |
resizefintab(tab); |
} |
|
addfintab(tab, p, f, ft); |
runtime_setblockspecial(p, true); |
runtime_unlock(tab); |
return true; |
} |
|
// get finalizer; if del, delete finalizer. |
// caller is responsible for updating RefHasFinalizer (special) bit. |
bool |
runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft) |
{ |
Fintab *tab; |
bool res; |
Fin f; |
|
tab = TAB(p); |
runtime_lock(tab); |
res = lookfintab(tab, p, del, &f); |
runtime_unlock(tab); |
if(res==false) |
return false; |
*fn = f.fn; |
*ft = f.ft; |
return true; |
} |
|
void |
runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)) |
{ |
void **key; |
void **ekey; |
int32 i; |
|
for(i=0; i<TABSZ; i++) { |
runtime_lock(&fintab[i]); |
key = fintab[i].fkey; |
ekey = key + fintab[i].max; |
for(; key < ekey; key++) |
if(*key != nil && *key != ((void*)-1)) |
fn(*key); |
scan((byte*)&fintab[i].fkey, sizeof(void*)); |
scan((byte*)&fintab[i].val, sizeof(void*)); |
runtime_unlock(&fintab[i]); |
} |
} |
/go-new-map.c
0,0 → 1,141
/* go-new-map.c -- allocate a new map. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "map.h" |
|
/* List of prime numbers, copied from libstdc++/src/hashtable.c. */ |
|
static const unsigned long prime_list[] = /* 256 + 1 or 256 + 48 + 1 */ |
{ |
2ul, 3ul, 5ul, 7ul, 11ul, 13ul, 17ul, 19ul, 23ul, 29ul, 31ul, |
37ul, 41ul, 43ul, 47ul, 53ul, 59ul, 61ul, 67ul, 71ul, 73ul, 79ul, |
83ul, 89ul, 97ul, 103ul, 109ul, 113ul, 127ul, 137ul, 139ul, 149ul, |
157ul, 167ul, 179ul, 193ul, 199ul, 211ul, 227ul, 241ul, 257ul, |
277ul, 293ul, 313ul, 337ul, 359ul, 383ul, 409ul, 439ul, 467ul, |
503ul, 541ul, 577ul, 619ul, 661ul, 709ul, 761ul, 823ul, 887ul, |
953ul, 1031ul, 1109ul, 1193ul, 1289ul, 1381ul, 1493ul, 1613ul, |
1741ul, 1879ul, 2029ul, 2179ul, 2357ul, 2549ul, 2753ul, 2971ul, |
3209ul, 3469ul, 3739ul, 4027ul, 4349ul, 4703ul, 5087ul, 5503ul, |
5953ul, 6427ul, 6949ul, 7517ul, 8123ul, 8783ul, 9497ul, 10273ul, |
11113ul, 12011ul, 12983ul, 14033ul, 15173ul, 16411ul, 17749ul, |
19183ul, 20753ul, 22447ul, 24281ul, 26267ul, 28411ul, 30727ul, |
33223ul, 35933ul, 38873ul, 42043ul, 45481ul, 49201ul, 53201ul, |
57557ul, 62233ul, 67307ul, 72817ul, 78779ul, 85229ul, 92203ul, |
99733ul, 107897ul, 116731ul, 126271ul, 136607ul, 147793ul, |
159871ul, 172933ul, 187091ul, 202409ul, 218971ul, 236897ul, |
256279ul, 277261ul, 299951ul, 324503ul, 351061ul, 379787ul, |
410857ul, 444487ul, 480881ul, 520241ul, 562841ul, 608903ul, |
658753ul, 712697ul, 771049ul, 834181ul, 902483ul, 976369ul, |
1056323ul, 1142821ul, 1236397ul, 1337629ul, 1447153ul, 1565659ul, |
1693859ul, 1832561ul, 1982627ul, 2144977ul, 2320627ul, 2510653ul, |
2716249ul, 2938679ul, 3179303ul, 3439651ul, 3721303ul, 4026031ul, |
4355707ul, 4712381ul, 5098259ul, 5515729ul, 5967347ul, 6456007ul, |
6984629ul, 7556579ul, 8175383ul, 8844859ul, 9569143ul, 10352717ul, |
11200489ul, 12117689ul, 13109983ul, 14183539ul, 15345007ul, |
16601593ul, 17961079ul, 19431899ul, 21023161ul, 22744717ul, |
24607243ul, 26622317ul, 28802401ul, 31160981ul, 33712729ul, |
36473443ul, 39460231ul, 42691603ul, 46187573ul, 49969847ul, |
54061849ul, 58488943ul, 63278561ul, 68460391ul, 74066549ul, |
80131819ul, 86693767ul, 93793069ul, 101473717ul, 109783337ul, |
118773397ul, 128499677ul, 139022417ul, 150406843ul, 162723577ul, |
176048909ul, 190465427ul, 206062531ul, 222936881ul, 241193053ul, |
260944219ul, 282312799ul, 305431229ul, 330442829ul, 357502601ul, |
386778277ul, 418451333ul, 452718089ul, 489790921ul, 529899637ul, |
573292817ul, 620239453ul, 671030513ul, 725980837ul, 785430967ul, |
849749479ul, 919334987ul, 994618837ul, 1076067617ul, 1164186217ul, |
1259520799ul, 1362662261ul, 1474249943ul, 1594975441ul, 1725587117ul, |
1866894511ul, 2019773507ul, 2185171673ul, 2364114217ul, 2557710269ul, |
2767159799ul, 2993761039ul, 3238918481ul, 3504151727ul, 3791104843ul, |
4101556399ul, 4294967291ul, |
#if __SIZEOF_LONG__ >= 8 |
6442450933ul, 8589934583ul, 12884901857ul, 17179869143ul, |
25769803693ul, 34359738337ul, 51539607367ul, 68719476731ul, |
103079215087ul, 137438953447ul, 206158430123ul, 274877906899ul, |
412316860387ul, 549755813881ul, 824633720731ul, 1099511627689ul, |
1649267441579ul, 2199023255531ul, 3298534883309ul, 4398046511093ul, |
6597069766607ul, 8796093022151ul, 13194139533241ul, 17592186044399ul, |
26388279066581ul, 35184372088777ul, 52776558133177ul, 70368744177643ul, |
105553116266399ul, 140737488355213ul, 211106232532861ul, 281474976710597ul, |
562949953421231ul, 1125899906842597ul, 2251799813685119ul, |
4503599627370449ul, 9007199254740881ul, 18014398509481951ul, |
36028797018963913ul, 72057594037927931ul, 144115188075855859ul, |
288230376151711717ul, 576460752303423433ul, |
1152921504606846883ul, 2305843009213693951ul, |
4611686018427387847ul, 9223372036854775783ul, |
18446744073709551557ul |
#endif |
}; |
|
/* Return the next number from PRIME_LIST >= N. */ |
|
uintptr_t |
__go_map_next_prime (uintptr_t n) |
{ |
size_t low; |
size_t high; |
|
low = 0; |
high = sizeof prime_list / sizeof prime_list[0]; |
while (low < high) |
{ |
size_t mid; |
|
mid = (low + high) / 2; |
|
/* Here LOW <= MID < HIGH. */ |
|
if (prime_list[mid] < n) |
high = mid; |
else if (prime_list[mid] > n) |
low = mid + 1; |
else |
return n; |
} |
if (low >= sizeof prime_list / sizeof prime_list[0]) |
return n; |
return prime_list[low]; |
} |
|
/* Allocate a new map. */ |
|
struct __go_map * |
__go_new_map (const struct __go_map_descriptor *descriptor, uintptr_t entries) |
{ |
int ientries; |
struct __go_map *ret; |
|
ientries = (int) entries; |
if (ientries < 0 || (uintptr_t) ientries != entries) |
runtime_panicstring ("map size out of range"); |
|
if (entries == 0) |
entries = 5; |
else |
entries = __go_map_next_prime (entries); |
ret = (struct __go_map *) __go_alloc (sizeof (struct __go_map)); |
ret->__descriptor = descriptor; |
ret->__element_count = 0; |
ret->__bucket_count = entries; |
ret->__buckets = (void **) __go_alloc (entries * sizeof (void *)); |
__builtin_memset (ret->__buckets, 0, entries * sizeof (void *)); |
return ret; |
} |
|
/* Allocate a new map when the argument to make is a large type. */ |
|
struct __go_map * |
__go_new_map_big (const struct __go_map_descriptor *descriptor, |
uint64_t entries) |
{ |
uintptr_t sentries; |
|
sentries = (uintptr_t) entries; |
if ((uint64_t) sentries != entries) |
runtime_panicstring ("map size out of range"); |
return __go_new_map (descriptor, sentries); |
} |
/go-setenv.c
0,0 → 1,66
/* go-setenv.c -- set the C environment from Go. |
|
Copyright 2011 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "config.h" |
|
#include <stddef.h> |
#include <stdlib.h> |
|
#include "go-alloc.h" |
#include "go-string.h" |
|
/* Set the C environment from Go. This is called by syscall.Setenv. */ |
|
void setenv_c (struct __go_string, struct __go_string) |
__asm__ ("libgo_syscall.syscall.setenv_c"); |
|
void |
setenv_c (struct __go_string k, struct __go_string v) |
{ |
const unsigned char *ks; |
unsigned char *kn; |
const unsigned char *vs; |
unsigned char *vn; |
|
ks = k.__data; |
kn = NULL; |
vs = v.__data; |
vn = NULL; |
|
#ifdef HAVE_SETENV |
|
if (ks[k.__length] != 0) |
{ |
kn = __go_alloc (k.__length + 1); |
__builtin_memcpy (kn, ks, k.__length); |
ks = kn; |
} |
|
if (vs[v.__length] != 0) |
{ |
vn = __go_alloc (v.__length + 1); |
__builtin_memcpy (vn, vs, v.__length); |
vs = vn; |
} |
|
setenv ((const char *) ks, (const char *) vs, 1); |
|
#else /* !defined(HAVE_SETENV) */ |
|
kn = malloc (k.__length + v.__length + 2); |
__builtin_memcpy (kn, ks, k.__length); |
kn[k.__length] = '='; |
__builtin_memcpy (kn + k.__length + 1, vs, v.__length); |
kn[k.__length + v.__length + 1] = '\0'; |
putenv ((char *) kn); |
|
#endif /* !defined(HAVE_SETENV) */ |
|
if (kn != NULL) |
__go_free (kn); |
if (vn != NULL) |
__go_free (vn); |
} |
/go-recover.c
0,0 → 1,80
/* go-recover.c -- support for the go recover function. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "interface.h" |
#include "go-panic.h" |
#include "go-defer.h" |
|
/* This is called by a thunk to see if the real function should be |
permitted to recover a panic value. Recovering a value is |
permitted if the thunk was called directly by defer. RETADDR is |
the return address of the function which is calling |
__go_can_recover--this is, the thunk. */ |
|
_Bool |
__go_can_recover (const void* retaddr) |
{ |
G *g; |
struct __go_defer_stack *d; |
const char* ret; |
const char* dret; |
|
g = runtime_g (); |
|
d = g->defer; |
if (d == NULL) |
return 0; |
|
/* The panic which this function would recover is the one on the top |
of the panic stack. We do not want to recover it if that panic |
was on the top of the panic stack when this function was |
deferred. */ |
if (d->__panic == g->panic) |
return 0; |
|
/* D->__RETADDR is the address of a label immediately following the |
call to the thunk. We can recover a panic if that is the same as |
the return address of the thunk. We permit a bit of slack in |
case there is any code between the function return and the label, |
such as an instruction to adjust the stack pointer. */ |
|
ret = (const char *) retaddr; |
|
#ifdef __sparc__ |
/* On SPARC the address we get, from __builtin_return_address, is |
the address of the call instruction. Adjust forward, also |
skipping the delayed instruction following the call. */ |
ret += 8; |
#endif |
|
dret = (const char *) d->__retaddr; |
return ret <= dret && ret + 16 >= dret; |
} |
|
/* This is only called when it is valid for the caller to recover the |
value on top of the panic stack, if there is one. */ |
|
struct __go_empty_interface |
__go_recover () |
{ |
G *g; |
struct __go_panic_stack *p; |
|
g = runtime_g (); |
|
if (g->panic == NULL || g->panic->__was_recovered) |
{ |
struct __go_empty_interface ret; |
|
ret.__type_descriptor = NULL; |
ret.__object = NULL; |
return ret; |
} |
p = g->panic; |
p->__was_recovered = 1; |
return p->__arg; |
} |
/chan.c
0,0 → 1,1269
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include "runtime.h" |
#include "go-type.h" |
|
#define NOSELGEN 1 |
|
static int32 debug = 0; |
|
typedef struct WaitQ WaitQ; |
typedef struct SudoG SudoG; |
typedef struct Select Select; |
typedef struct Scase Scase; |
|
typedef struct __go_type_descriptor Type; |
typedef struct __go_channel_type ChanType; |
|
struct SudoG |
{ |
G* g; // g and selgen constitute |
uint32 selgen; // a weak pointer to g |
SudoG* link; |
byte* elem; // data element |
}; |
|
struct WaitQ |
{ |
SudoG* first; |
SudoG* last; |
}; |
|
struct Hchan |
{ |
uint32 qcount; // total data in the q |
uint32 dataqsiz; // size of the circular q |
uint16 elemsize; |
bool closed; |
uint8 elemalign; |
uint32 sendx; // send index |
uint32 recvx; // receive index |
WaitQ recvq; // list of recv waiters |
WaitQ sendq; // list of send waiters |
Lock; |
}; |
|
// Buffer follows Hchan immediately in memory. |
// chanbuf(c, i) is pointer to the i'th slot in the buffer. |
#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i)) |
|
enum |
{ |
// Scase.kind |
CaseRecv, |
CaseSend, |
CaseDefault, |
}; |
|
struct Scase |
{ |
SudoG sg; // must be first member (cast to Scase) |
Hchan* chan; // chan |
uint16 kind; |
uint16 index; // index to return |
bool* receivedp; // pointer to received bool (recv2) |
}; |
|
struct Select |
{ |
uint16 tcase; // total count of scase[] |
uint16 ncase; // currently filled scase[] |
uint16* pollorder; // case poll order |
Hchan** lockorder; // channel lock order |
Scase scase[1]; // one per case (in order of appearance) |
}; |
|
static void dequeueg(WaitQ*); |
static SudoG* dequeue(WaitQ*); |
static void enqueue(WaitQ*, SudoG*); |
|
Hchan* |
runtime_makechan_c(ChanType *t, int64 hint) |
{ |
Hchan *c; |
int32 n; |
const Type *elem; |
|
elem = t->__element_type; |
|
if(hint < 0 || (int32)hint != hint || (elem->__size > 0 && (uintptr)hint > ((uintptr)-1) / elem->__size)) |
runtime_panicstring("makechan: size out of range"); |
|
n = sizeof(*c); |
|
// allocate memory in one call |
c = (Hchan*)runtime_mal(n + hint*elem->__size); |
c->elemsize = elem->__size; |
c->elemalign = elem->__align; |
c->dataqsiz = hint; |
|
if(debug) |
runtime_printf("makechan: chan=%p; elemsize=%lld; elemalign=%d; dataqsiz=%d\n", |
c, (long long)elem->__size, elem->__align, c->dataqsiz); |
|
return c; |
} |
|
// For reflect |
// func makechan(typ *ChanType, size uint32) (chan) |
uintptr reflect_makechan(ChanType *, uint32) |
asm ("libgo_reflect.reflect.makechan"); |
|
uintptr |
reflect_makechan(ChanType *t, uint32 size) |
{ |
void *ret; |
Hchan *c; |
|
c = runtime_makechan_c(t, size); |
ret = runtime_mal(sizeof(void*)); |
__builtin_memcpy(ret, &c, sizeof(void*)); |
return (uintptr)ret; |
} |
|
// makechan(t *ChanType, hint int64) (hchan *chan any); |
Hchan* |
__go_new_channel(ChanType *t, uintptr hint) |
{ |
return runtime_makechan_c(t, hint); |
} |
|
Hchan* |
__go_new_channel_big(ChanType *t, uint64 hint) |
{ |
return runtime_makechan_c(t, hint); |
} |
|
/* |
* generic single channel send/recv |
* if the bool pointer is nil, |
* then the full exchange will |
* occur. if pres is not nil, |
* then the protocol will not |
* sleep but return if it could |
* not complete. |
* |
* sleep can wake up with g->param == nil |
* when a channel involved in the sleep has |
* been closed. it is easiest to loop and re-run |
* the operation; we'll see that it's now closed. |
*/ |
void |
runtime_chansend(ChanType *t, Hchan *c, byte *ep, bool *pres) |
{ |
SudoG *sg; |
SudoG mysg; |
G* gp; |
G* g; |
|
g = runtime_g(); |
|
if(c == nil) { |
USED(t); |
if(pres != nil) { |
*pres = false; |
return; |
} |
g->status = Gwaiting; |
g->waitreason = "chan send (nil chan)"; |
runtime_gosched(); |
return; // not reached |
} |
|
if(runtime_gcwaiting) |
runtime_gosched(); |
|
if(debug) { |
runtime_printf("chansend: chan=%p\n", c); |
} |
|
runtime_lock(c); |
if(c->closed) |
goto closed; |
|
if(c->dataqsiz > 0) |
goto asynch; |
|
sg = dequeue(&c->recvq); |
if(sg != nil) { |
runtime_unlock(c); |
|
gp = sg->g; |
gp->param = sg; |
if(sg->elem != nil) |
runtime_memmove(sg->elem, ep, c->elemsize); |
runtime_ready(gp); |
|
if(pres != nil) |
*pres = true; |
return; |
} |
|
if(pres != nil) { |
runtime_unlock(c); |
*pres = false; |
return; |
} |
|
mysg.elem = ep; |
mysg.g = g; |
mysg.selgen = NOSELGEN; |
g->param = nil; |
g->status = Gwaiting; |
g->waitreason = "chan send"; |
enqueue(&c->sendq, &mysg); |
runtime_unlock(c); |
runtime_gosched(); |
|
if(g->param == nil) { |
runtime_lock(c); |
if(!c->closed) |
runtime_throw("chansend: spurious wakeup"); |
goto closed; |
} |
|
return; |
|
asynch: |
if(c->closed) |
goto closed; |
|
if(c->qcount >= c->dataqsiz) { |
if(pres != nil) { |
runtime_unlock(c); |
*pres = false; |
return; |
} |
mysg.g = g; |
mysg.elem = nil; |
mysg.selgen = NOSELGEN; |
g->status = Gwaiting; |
g->waitreason = "chan send"; |
enqueue(&c->sendq, &mysg); |
runtime_unlock(c); |
runtime_gosched(); |
|
runtime_lock(c); |
goto asynch; |
} |
runtime_memmove(chanbuf(c, c->sendx), ep, c->elemsize); |
if(++c->sendx == c->dataqsiz) |
c->sendx = 0; |
c->qcount++; |
|
sg = dequeue(&c->recvq); |
if(sg != nil) { |
gp = sg->g; |
runtime_unlock(c); |
runtime_ready(gp); |
} else |
runtime_unlock(c); |
if(pres != nil) |
*pres = true; |
return; |
|
closed: |
runtime_unlock(c); |
runtime_panicstring("send on closed channel"); |
} |
|
|
void |
runtime_chanrecv(ChanType *t, Hchan* c, byte *ep, bool *selected, bool *received) |
{ |
SudoG *sg; |
SudoG mysg; |
G *gp; |
G *g; |
|
if(runtime_gcwaiting) |
runtime_gosched(); |
|
if(debug) |
runtime_printf("chanrecv: chan=%p\n", c); |
|
g = runtime_g(); |
|
if(c == nil) { |
USED(t); |
if(selected != nil) { |
*selected = false; |
return; |
} |
g->status = Gwaiting; |
g->waitreason = "chan receive (nil chan)"; |
runtime_gosched(); |
return; // not reached |
} |
|
runtime_lock(c); |
if(c->dataqsiz > 0) |
goto asynch; |
|
if(c->closed) |
goto closed; |
|
sg = dequeue(&c->sendq); |
if(sg != nil) { |
runtime_unlock(c); |
|
if(ep != nil) |
runtime_memmove(ep, sg->elem, c->elemsize); |
gp = sg->g; |
gp->param = sg; |
runtime_ready(gp); |
|
if(selected != nil) |
*selected = true; |
if(received != nil) |
*received = true; |
return; |
} |
|
if(selected != nil) { |
runtime_unlock(c); |
*selected = false; |
return; |
} |
|
mysg.elem = ep; |
mysg.g = g; |
mysg.selgen = NOSELGEN; |
g->param = nil; |
g->status = Gwaiting; |
g->waitreason = "chan receive"; |
enqueue(&c->recvq, &mysg); |
runtime_unlock(c); |
runtime_gosched(); |
|
if(g->param == nil) { |
runtime_lock(c); |
if(!c->closed) |
runtime_throw("chanrecv: spurious wakeup"); |
goto closed; |
} |
|
if(received != nil) |
*received = true; |
return; |
|
asynch: |
if(c->qcount <= 0) { |
if(c->closed) |
goto closed; |
|
if(selected != nil) { |
runtime_unlock(c); |
*selected = false; |
if(received != nil) |
*received = false; |
return; |
} |
mysg.g = g; |
mysg.elem = nil; |
mysg.selgen = NOSELGEN; |
g->status = Gwaiting; |
g->waitreason = "chan receive"; |
enqueue(&c->recvq, &mysg); |
runtime_unlock(c); |
runtime_gosched(); |
|
runtime_lock(c); |
goto asynch; |
} |
if(ep != nil) |
runtime_memmove(ep, chanbuf(c, c->recvx), c->elemsize); |
runtime_memclr(chanbuf(c, c->recvx), c->elemsize); |
if(++c->recvx == c->dataqsiz) |
c->recvx = 0; |
c->qcount--; |
|
sg = dequeue(&c->sendq); |
if(sg != nil) { |
gp = sg->g; |
runtime_unlock(c); |
runtime_ready(gp); |
} else |
runtime_unlock(c); |
|
if(selected != nil) |
*selected = true; |
if(received != nil) |
*received = true; |
return; |
|
closed: |
if(ep != nil) |
runtime_memclr(ep, c->elemsize); |
if(selected != nil) |
*selected = true; |
if(received != nil) |
*received = false; |
runtime_unlock(c); |
} |
|
// The compiler generates a call to __go_send_small to send a value 8 |
// bytes or smaller. |
void |
__go_send_small(ChanType *t, Hchan* c, uint64 val) |
{ |
union |
{ |
byte b[sizeof(uint64)]; |
uint64 v; |
} u; |
byte *p; |
|
u.v = val; |
#ifndef WORDS_BIGENDIAN |
p = u.b; |
#else |
p = u.b + sizeof(uint64) - t->__element_type->__size; |
#endif |
runtime_chansend(t, c, p, nil); |
} |
|
// The compiler generates a call to __go_send_big to send a value |
// larger than 8 bytes or smaller. |
void |
__go_send_big(ChanType *t, Hchan* c, byte* p) |
{ |
runtime_chansend(t, c, p, nil); |
} |
|
// The compiler generates a call to __go_receive_small to receive a |
// value 8 bytes or smaller. |
uint64 |
__go_receive_small(ChanType *t, Hchan* c) |
{ |
union { |
byte b[sizeof(uint64)]; |
uint64 v; |
} u; |
byte *p; |
|
u.v = 0; |
#ifndef WORDS_BIGENDIAN |
p = u.b; |
#else |
p = u.b + sizeof(uint64) - t->__element_type->__size; |
#endif |
runtime_chanrecv(t, c, p, nil, nil); |
return u.v; |
} |
|
// The compiler generates a call to __go_receive_big to receive a |
// value larger than 8 bytes. |
void |
__go_receive_big(ChanType *t, Hchan* c, byte* p) |
{ |
runtime_chanrecv(t, c, p, nil, nil); |
} |
|
_Bool runtime_chanrecv2(ChanType *t, Hchan* c, byte* p) |
__asm__("runtime.chanrecv2"); |
|
_Bool |
runtime_chanrecv2(ChanType *t, Hchan* c, byte* p) |
{ |
bool received; |
|
runtime_chanrecv(t, c, p, nil, &received); |
return received; |
} |
|
// func selectnbsend(c chan any, elem any) bool |
// |
// compiler implements |
// |
// select { |
// case c <- v: |
// ... foo |
// default: |
// ... bar |
// } |
// |
// as |
// |
// if selectnbsend(c, v) { |
// ... foo |
// } else { |
// ... bar |
// } |
// |
_Bool |
runtime_selectnbsend(ChanType *t, Hchan *c, byte *p) |
{ |
bool res; |
|
runtime_chansend(t, c, p, &res); |
return res; |
} |
|
// func selectnbrecv(elem *any, c chan any) bool |
// |
// compiler implements |
// |
// select { |
// case v = <-c: |
// ... foo |
// default: |
// ... bar |
// } |
// |
// as |
// |
// if selectnbrecv(&v, c) { |
// ... foo |
// } else { |
// ... bar |
// } |
// |
_Bool |
runtime_selectnbrecv(ChanType *t, byte *v, Hchan *c) |
{ |
bool selected; |
|
runtime_chanrecv(t, c, v, &selected, nil); |
return selected; |
} |
|
// func selectnbrecv2(elem *any, ok *bool, c chan any) bool |
// |
// compiler implements |
// |
// select { |
// case v, ok = <-c: |
// ... foo |
// default: |
// ... bar |
// } |
// |
// as |
// |
// if c != nil && selectnbrecv2(&v, &ok, c) { |
// ... foo |
// } else { |
// ... bar |
// } |
// |
_Bool |
runtime_selectnbrecv2(ChanType *t, byte *v, _Bool *received, Hchan *c) |
{ |
bool selected; |
bool r; |
|
r = false; |
runtime_chanrecv(t, c, v, &selected, received == nil ? nil : &r); |
if(received != nil) |
*received = r; |
return selected; |
} |
|
// For reflect: |
// func chansend(c chan, val iword, nb bool) (selected bool) |
// where an iword is the same word an interface value would use: |
// the actual data if it fits, or else a pointer to the data. |
|
_Bool reflect_chansend(ChanType *, Hchan *, uintptr, _Bool) |
__asm__("libgo_reflect.reflect.chansend"); |
|
_Bool |
reflect_chansend(ChanType *t, Hchan *c, uintptr val, _Bool nb) |
{ |
bool selected; |
bool *sp; |
byte *vp; |
|
if(nb) { |
selected = false; |
sp = (bool*)&selected; |
} else { |
selected = true; |
sp = nil; |
} |
if(__go_is_pointer_type(t->__element_type)) |
vp = (byte*)&val; |
else |
vp = (byte*)val; |
runtime_chansend(t, c, vp, sp); |
return selected; |
} |
|
// For reflect: |
// func chanrecv(c chan, nb bool) (val iword, selected, received bool) |
// where an iword is the same word an interface value would use: |
// the actual data if it fits, or else a pointer to the data. |
|
struct chanrecv_ret |
{ |
uintptr val; |
_Bool selected; |
_Bool received; |
}; |
|
struct chanrecv_ret reflect_chanrecv(ChanType *, Hchan *, _Bool) |
__asm__("libgo_reflect.reflect.chanrecv"); |
|
struct chanrecv_ret |
reflect_chanrecv(ChanType *t, Hchan *c, _Bool nb) |
{ |
struct chanrecv_ret ret; |
byte *vp; |
bool *sp; |
bool selected; |
bool received; |
|
if(nb) { |
selected = false; |
sp = &selected; |
} else { |
ret.selected = true; |
sp = nil; |
} |
received = false; |
if(__go_is_pointer_type(t->__element_type)) { |
vp = (byte*)&ret.val; |
} else { |
vp = runtime_mal(t->__element_type->__size); |
ret.val = (uintptr)vp; |
} |
runtime_chanrecv(t, c, vp, sp, &received); |
if(nb) |
ret.selected = selected; |
ret.received = received; |
return ret; |
} |
|
static void newselect(int32, Select**); |
|
// newselect(size uint32) (sel *byte); |
|
void* runtime_newselect(int) __asm__("runtime.newselect"); |
|
void* |
runtime_newselect(int size) |
{ |
Select *sel; |
|
newselect(size, &sel); |
return (void*)sel; |
} |
|
static void |
newselect(int32 size, Select **selp) |
{ |
int32 n; |
Select *sel; |
|
n = 0; |
if(size > 1) |
n = size-1; |
|
sel = runtime_mal(sizeof(*sel) + |
n*sizeof(sel->scase[0]) + |
size*sizeof(sel->lockorder[0]) + |
size*sizeof(sel->pollorder[0])); |
|
sel->tcase = size; |
sel->ncase = 0; |
sel->lockorder = (void*)(sel->scase + size); |
sel->pollorder = (void*)(sel->lockorder + size); |
*selp = sel; |
|
if(debug) |
runtime_printf("newselect s=%p size=%d\n", sel, size); |
} |
|
// cut in half to give stack a chance to split |
static void selectsend(Select *sel, Hchan *c, int index, void *elem); |
|
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool); |
|
void runtime_selectsend(Select *, Hchan *, void *, int) |
__asm__("runtime.selectsend"); |
|
void |
runtime_selectsend(Select *sel, Hchan *c, void *elem, int index) |
{ |
// nil cases do not compete |
if(c == nil) |
return; |
|
selectsend(sel, c, index, elem); |
} |
|
static void |
selectsend(Select *sel, Hchan *c, int index, void *elem) |
{ |
int32 i; |
Scase *cas; |
|
i = sel->ncase; |
if(i >= sel->tcase) |
runtime_throw("selectsend: too many cases"); |
sel->ncase = i+1; |
cas = &sel->scase[i]; |
|
cas->index = index; |
cas->chan = c; |
cas->kind = CaseSend; |
cas->sg.elem = elem; |
|
if(debug) |
runtime_printf("selectsend s=%p index=%d chan=%p\n", |
sel, cas->index, cas->chan); |
} |
|
// cut in half to give stack a chance to split |
static void selectrecv(Select *sel, Hchan *c, int index, void *elem, bool*); |
|
// selectrecv(sel *byte, hchan *chan any, elem *any) (selected bool); |
|
void runtime_selectrecv(Select *, Hchan *, void *, int) |
__asm__("runtime.selectrecv"); |
|
void |
runtime_selectrecv(Select *sel, Hchan *c, void *elem, int index) |
{ |
// nil cases do not compete |
if(c == nil) |
return; |
|
selectrecv(sel, c, index, elem, nil); |
} |
|
// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool); |
|
void runtime_selectrecv2(Select *, Hchan *, void *, bool *, int) |
__asm__("runtime.selectrecv2"); |
|
void |
runtime_selectrecv2(Select *sel, Hchan *c, void *elem, bool *received, int index) |
{ |
// nil cases do not compete |
if(c == nil) |
return; |
|
selectrecv(sel, c, index, elem, received); |
} |
|
static void |
selectrecv(Select *sel, Hchan *c, int index, void *elem, bool *received) |
{ |
int32 i; |
Scase *cas; |
|
i = sel->ncase; |
if(i >= sel->tcase) |
runtime_throw("selectrecv: too many cases"); |
sel->ncase = i+1; |
cas = &sel->scase[i]; |
cas->index = index; |
cas->chan = c; |
|
cas->kind = CaseRecv; |
cas->sg.elem = elem; |
cas->receivedp = received; |
|
if(debug) |
runtime_printf("selectrecv s=%p index=%d chan=%p\n", |
sel, cas->index, cas->chan); |
} |
|
// cut in half to give stack a chance to split |
static void selectdefault(Select*, int); |
|
// selectdefault(sel *byte) (selected bool); |
|
void runtime_selectdefault(Select *, int) __asm__("runtime.selectdefault"); |
|
void |
runtime_selectdefault(Select *sel, int index) |
{ |
selectdefault(sel, index); |
} |
|
static void |
selectdefault(Select *sel, int index) |
{ |
int32 i; |
Scase *cas; |
|
i = sel->ncase; |
if(i >= sel->tcase) |
runtime_throw("selectdefault: too many cases"); |
sel->ncase = i+1; |
cas = &sel->scase[i]; |
cas->index = index; |
cas->chan = nil; |
|
cas->kind = CaseDefault; |
|
if(debug) |
runtime_printf("selectdefault s=%p index=%d\n", |
sel, cas->index); |
} |
|
static void |
sellock(Select *sel) |
{ |
uint32 i; |
Hchan *c, *c0; |
|
c = nil; |
for(i=0; i<sel->ncase; i++) { |
c0 = sel->lockorder[i]; |
if(c0 && c0 != c) { |
c = sel->lockorder[i]; |
runtime_lock(c); |
} |
} |
} |
|
static void |
selunlock(Select *sel) |
{ |
uint32 i; |
Hchan *c, *c0; |
|
c = nil; |
for(i=sel->ncase; i-->0;) { |
c0 = sel->lockorder[i]; |
if(c0 && c0 != c) { |
c = c0; |
runtime_unlock(c); |
} |
} |
} |
|
void |
runtime_block(void) |
{ |
G *g; |
|
g = runtime_g(); |
g->status = Gwaiting; // forever |
g->waitreason = "select (no cases)"; |
runtime_gosched(); |
} |
|
static int selectgo(Select**); |
|
// selectgo(sel *byte); |
|
int runtime_selectgo(Select *) __asm__("runtime.selectgo"); |
|
int |
runtime_selectgo(Select *sel) |
{ |
return selectgo(&sel); |
} |
|
static int |
selectgo(Select **selp) |
{ |
Select *sel; |
uint32 o, i, j; |
Scase *cas, *dfl; |
Hchan *c; |
SudoG *sg; |
G *gp; |
int index; |
G *g; |
|
sel = *selp; |
if(runtime_gcwaiting) |
runtime_gosched(); |
|
if(debug) |
runtime_printf("select: sel=%p\n", sel); |
|
g = runtime_g(); |
|
// The compiler rewrites selects that statically have |
// only 0 or 1 cases plus default into simpler constructs. |
// The only way we can end up with such small sel->ncase |
// values here is for a larger select in which most channels |
// have been nilled out. The general code handles those |
// cases correctly, and they are rare enough not to bother |
// optimizing (and needing to test). |
|
// generate permuted order |
for(i=0; i<sel->ncase; i++) |
sel->pollorder[i] = i; |
for(i=1; i<sel->ncase; i++) { |
o = sel->pollorder[i]; |
j = runtime_fastrand1()%(i+1); |
sel->pollorder[i] = sel->pollorder[j]; |
sel->pollorder[j] = o; |
} |
|
// sort the cases by Hchan address to get the locking order. |
for(i=0; i<sel->ncase; i++) { |
c = sel->scase[i].chan; |
for(j=i; j>0 && sel->lockorder[j-1] >= c; j--) |
sel->lockorder[j] = sel->lockorder[j-1]; |
sel->lockorder[j] = c; |
} |
sellock(sel); |
|
loop: |
// pass 1 - look for something already waiting |
dfl = nil; |
for(i=0; i<sel->ncase; i++) { |
o = sel->pollorder[i]; |
cas = &sel->scase[o]; |
c = cas->chan; |
|
switch(cas->kind) { |
case CaseRecv: |
if(c->dataqsiz > 0) { |
if(c->qcount > 0) |
goto asyncrecv; |
} else { |
sg = dequeue(&c->sendq); |
if(sg != nil) |
goto syncrecv; |
} |
if(c->closed) |
goto rclose; |
break; |
|
case CaseSend: |
if(c->closed) |
goto sclose; |
if(c->dataqsiz > 0) { |
if(c->qcount < c->dataqsiz) |
goto asyncsend; |
} else { |
sg = dequeue(&c->recvq); |
if(sg != nil) |
goto syncsend; |
} |
break; |
|
case CaseDefault: |
dfl = cas; |
break; |
} |
} |
|
if(dfl != nil) { |
selunlock(sel); |
cas = dfl; |
goto retc; |
} |
|
|
// pass 2 - enqueue on all chans |
for(i=0; i<sel->ncase; i++) { |
o = sel->pollorder[i]; |
cas = &sel->scase[o]; |
c = cas->chan; |
sg = &cas->sg; |
sg->g = g; |
sg->selgen = g->selgen; |
|
switch(cas->kind) { |
case CaseRecv: |
enqueue(&c->recvq, sg); |
break; |
|
case CaseSend: |
enqueue(&c->sendq, sg); |
break; |
} |
} |
|
g->param = nil; |
g->status = Gwaiting; |
g->waitreason = "select"; |
selunlock(sel); |
runtime_gosched(); |
|
sellock(sel); |
sg = g->param; |
|
// pass 3 - dequeue from unsuccessful chans |
// otherwise they stack up on quiet channels |
for(i=0; i<sel->ncase; i++) { |
cas = &sel->scase[i]; |
if(cas != (Scase*)sg) { |
c = cas->chan; |
if(cas->kind == CaseSend) |
dequeueg(&c->sendq); |
else |
dequeueg(&c->recvq); |
} |
} |
|
if(sg == nil) |
goto loop; |
|
cas = (Scase*)sg; |
c = cas->chan; |
|
if(c->dataqsiz > 0) |
runtime_throw("selectgo: shouldnt happen"); |
|
if(debug) |
runtime_printf("wait-return: sel=%p c=%p cas=%p kind=%d\n", |
sel, c, cas, cas->kind); |
|
if(cas->kind == CaseRecv) { |
if(cas->receivedp != nil) |
*cas->receivedp = true; |
} |
|
selunlock(sel); |
goto retc; |
|
asyncrecv: |
// can receive from buffer |
if(cas->receivedp != nil) |
*cas->receivedp = true; |
if(cas->sg.elem != nil) |
runtime_memmove(cas->sg.elem, chanbuf(c, c->recvx), c->elemsize); |
runtime_memclr(chanbuf(c, c->recvx), c->elemsize); |
if(++c->recvx == c->dataqsiz) |
c->recvx = 0; |
c->qcount--; |
sg = dequeue(&c->sendq); |
if(sg != nil) { |
gp = sg->g; |
selunlock(sel); |
runtime_ready(gp); |
} else { |
selunlock(sel); |
} |
goto retc; |
|
asyncsend: |
// can send to buffer |
runtime_memmove(chanbuf(c, c->sendx), cas->sg.elem, c->elemsize); |
if(++c->sendx == c->dataqsiz) |
c->sendx = 0; |
c->qcount++; |
sg = dequeue(&c->recvq); |
if(sg != nil) { |
gp = sg->g; |
selunlock(sel); |
runtime_ready(gp); |
} else { |
selunlock(sel); |
} |
goto retc; |
|
syncrecv: |
// can receive from sleeping sender (sg) |
selunlock(sel); |
if(debug) |
runtime_printf("syncrecv: sel=%p c=%p o=%d\n", sel, c, o); |
if(cas->receivedp != nil) |
*cas->receivedp = true; |
if(cas->sg.elem != nil) |
runtime_memmove(cas->sg.elem, sg->elem, c->elemsize); |
gp = sg->g; |
gp->param = sg; |
runtime_ready(gp); |
goto retc; |
|
rclose: |
// read at end of closed channel |
selunlock(sel); |
if(cas->receivedp != nil) |
*cas->receivedp = false; |
if(cas->sg.elem != nil) |
runtime_memclr(cas->sg.elem, c->elemsize); |
goto retc; |
|
syncsend: |
// can send to sleeping receiver (sg) |
selunlock(sel); |
if(debug) |
runtime_printf("syncsend: sel=%p c=%p o=%d\n", sel, c, o); |
if(sg->elem != nil) |
runtime_memmove(sg->elem, cas->sg.elem, c->elemsize); |
gp = sg->g; |
gp->param = sg; |
runtime_ready(gp); |
|
retc: |
// return index corresponding to chosen case |
index = cas->index; |
runtime_free(sel); |
return index; |
|
sclose: |
// send on closed channel |
selunlock(sel); |
runtime_panicstring("send on closed channel"); |
return 0; // not reached |
} |
|
// closechan(sel *byte); |
void |
runtime_closechan(Hchan *c) |
{ |
SudoG *sg; |
G* gp; |
|
if(c == nil) |
runtime_panicstring("close of nil channel"); |
|
if(runtime_gcwaiting) |
runtime_gosched(); |
|
runtime_lock(c); |
if(c->closed) { |
runtime_unlock(c); |
runtime_panicstring("close of closed channel"); |
} |
|
c->closed = true; |
|
// release all readers |
for(;;) { |
sg = dequeue(&c->recvq); |
if(sg == nil) |
break; |
gp = sg->g; |
gp->param = nil; |
runtime_ready(gp); |
} |
|
// release all writers |
for(;;) { |
sg = dequeue(&c->sendq); |
if(sg == nil) |
break; |
gp = sg->g; |
gp->param = nil; |
runtime_ready(gp); |
} |
|
runtime_unlock(c); |
} |
|
void |
__go_builtin_close(Hchan *c) |
{ |
runtime_closechan(c); |
} |
|
// For reflect |
// func chanclose(c chan) |
|
void reflect_chanclose(uintptr) __asm__("libgo_reflect.reflect.chanclose"); |
|
void |
reflect_chanclose(uintptr c) |
{ |
runtime_closechan((Hchan*)c); |
} |
|
// For reflect |
// func chanlen(c chan) (len int32) |
|
int32 reflect_chanlen(uintptr) __asm__("libgo_reflect.reflect.chanlen"); |
|
int32 |
reflect_chanlen(uintptr ca) |
{ |
Hchan *c; |
int32 len; |
|
c = (Hchan*)ca; |
if(c == nil) |
len = 0; |
else |
len = c->qcount; |
return len; |
} |
|
int |
__go_chan_len(Hchan *c) |
{ |
return reflect_chanlen((uintptr)c); |
} |
|
// For reflect |
// func chancap(c chan) (cap int32) |
|
int32 reflect_chancap(uintptr) __asm__("libgo_reflect.reflect.chancap"); |
|
int32 |
reflect_chancap(uintptr ca) |
{ |
Hchan *c; |
int32 cap; |
|
c = (Hchan*)ca; |
if(c == nil) |
cap = 0; |
else |
cap = c->dataqsiz; |
return cap; |
} |
|
int |
__go_chan_cap(Hchan *c) |
{ |
return reflect_chancap((uintptr)c); |
} |
|
static SudoG* |
dequeue(WaitQ *q) |
{ |
SudoG *sgp; |
|
loop: |
sgp = q->first; |
if(sgp == nil) |
return nil; |
q->first = sgp->link; |
|
// if sgp is stale, ignore it |
if(sgp->selgen != NOSELGEN && |
(sgp->selgen != sgp->g->selgen || |
!runtime_cas(&sgp->g->selgen, sgp->selgen, sgp->selgen + 2))) { |
//prints("INVALID PSEUDOG POINTER\n"); |
goto loop; |
} |
|
return sgp; |
} |
|
static void |
dequeueg(WaitQ *q) |
{ |
SudoG **l, *sgp, *prevsgp; |
G *g; |
|
g = runtime_g(); |
prevsgp = nil; |
for(l=&q->first; (sgp=*l) != nil; l=&sgp->link, prevsgp=sgp) { |
if(sgp->g == g) { |
*l = sgp->link; |
if(q->last == sgp) |
q->last = prevsgp; |
break; |
} |
} |
} |
|
static void |
enqueue(WaitQ *q, SudoG *sgp) |
{ |
sgp->link = nil; |
if(q->first == nil) { |
q->first = sgp; |
q->last = sgp; |
return; |
} |
q->last->link = sgp; |
q->last = sgp; |
} |
/go-unsafe-newarray.c
0,0 → 1,32
/* go-unsafe-newarray.c -- unsafe.NewArray function for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-type.h" |
#include "interface.h" |
|
/* Implement unsafe.NewArray. */ |
|
void *NewArray (struct __go_empty_interface type, int n) |
asm ("libgo_unsafe.unsafe.NewArray"); |
|
/* The dynamic type of the argument will be a pointer to a type |
descriptor. */ |
|
void * |
NewArray (struct __go_empty_interface type, int n) |
{ |
const struct __go_type_descriptor *descriptor; |
|
if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
/* FIXME: We should check __type_descriptor to verify that this is |
really a type descriptor. */ |
descriptor = (const struct __go_type_descriptor *) type.__object; |
return __go_alloc (descriptor->__size * n); |
} |
/go-rune.c
0,0 → 1,77
/* go-rune.c -- rune functions for Go. |
|
Copyright 2009, 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "go-string.h" |
|
/* Get a character from the UTF-8 string STR, of length LEN. Store |
the Unicode character, if any, in *RUNE. Return the number of |
characters used from STR. */ |
|
int |
__go_get_rune (const unsigned char *str, size_t len, int *rune) |
{ |
int c, c1, c2, c3; |
|
/* Default to the "replacement character". */ |
*rune = 0xfffd; |
|
if (len <= 0) |
return 1; |
|
c = *str; |
if (c <= 0x7f) |
{ |
*rune = c; |
return 1; |
} |
|
if (len <= 1) |
return 1; |
|
c1 = str[1]; |
if ((c & 0xe0) == 0xc0 |
&& (c1 & 0xc0) == 0x80) |
{ |
*rune = (((c & 0x1f) << 6) |
+ (c1 & 0x3f)); |
return 2; |
} |
|
if (len <= 2) |
return 1; |
|
c2 = str[2]; |
if ((c & 0xf0) == 0xe0 |
&& (c1 & 0xc0) == 0x80 |
&& (c2 & 0xc0) == 0x80) |
{ |
*rune = (((c & 0xf) << 12) |
+ ((c1 & 0x3f) << 6) |
+ (c2 & 0x3f)); |
return 3; |
} |
|
if (len <= 3) |
return 1; |
|
c3 = str[3]; |
if ((c & 0xf8) == 0xf0 |
&& (c1 & 0xc0) == 0x80 |
&& (c2 & 0xc0) == 0x80 |
&& (c3 & 0xc0) == 0x80) |
{ |
*rune = (((c & 0x7) << 18) |
+ ((c1 & 0x3f) << 12) |
+ ((c2 & 0x3f) << 6) |
+ (c3 & 0x3f)); |
return 4; |
} |
|
/* Invalid encoding. Return 1 so that we advance. */ |
return 1; |
} |
/sigqueue.goc
0,0 → 1,115
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// This file implements runtime support for signal handling. |
// |
// Most synchronization primitives are not available from |
// the signal handler (it cannot block and cannot use locks) |
// so the handler communicates with a processing goroutine |
// via struct sig, below. |
// |
// Ownership for sig.Note passes back and forth between |
// the signal handler and the signal goroutine in rounds. |
// The initial state is that sig.note is cleared (setup by siginit). |
// At the beginning of each round, mask == 0. |
// The round goes through three stages: |
// |
// (In parallel) |
// 1a) One or more signals arrive and are handled |
// by sigsend using cas to set bits in sig.mask. |
// The handler that changes sig.mask from zero to non-zero |
// calls notewakeup(&sig). |
// 1b) Sigrecv calls notesleep(&sig) to wait for the wakeup. |
// |
// 2) Having received the wakeup, sigrecv knows that sigsend |
// will not send another wakeup, so it can noteclear(&sig) |
// to prepare for the next round. (Sigsend may still be adding |
// signals to sig.mask at this point, which is fine.) |
// |
// 3) Sigrecv uses cas to grab the current sig.mask and zero it, |
// triggering the next round. |
// |
// The signal handler takes ownership of the note by atomically |
// changing mask from a zero to non-zero value. It gives up |
// ownership by calling notewakeup. The signal goroutine takes |
// ownership by returning from notesleep (caused by the notewakeup) |
// and gives up ownership by clearing mask. |
|
package runtime |
#include "config.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
#include "defs.h" |
|
static struct { |
Note; |
uint32 mask; |
bool inuse; |
} sig; |
|
void |
siginit(void) |
{ |
runtime_noteclear(&sig); |
} |
|
// Called from sighandler to send a signal back out of the signal handling thread. |
bool |
__go_sigsend(int32 s) |
{ |
uint32 bit, mask; |
|
if(!sig.inuse) |
return false; |
bit = 1 << s; |
for(;;) { |
mask = sig.mask; |
if(mask & bit) |
break; // signal already in queue |
if(runtime_cas(&sig.mask, mask, mask|bit)) { |
// Added to queue. |
// Only send a wakeup for the first signal in each round. |
if(mask == 0) |
runtime_notewakeup(&sig); |
break; |
} |
} |
return true; |
} |
|
// Called to receive a bitmask of queued signals. |
func Sigrecv() (m uint32) { |
runtime_entersyscall(); |
runtime_notesleep(&sig); |
runtime_exitsyscall(); |
runtime_noteclear(&sig); |
for(;;) { |
m = sig.mask; |
if(runtime_cas(&sig.mask, m, 0)) |
break; |
} |
} |
|
func Signame(sig int32) (name String) { |
const char* s = NULL; |
char buf[100]; |
#if defined(HAVE_STRSIGNAL) |
s = strsignal(sig); |
#endif |
if (s == NULL) { |
snprintf(buf, sizeof buf, "signal %d", sig); |
s = buf; |
} |
int32 len = __builtin_strlen(s); |
unsigned char *data = runtime_mallocgc(len, FlagNoPointers, 0, 0); |
__builtin_memcpy(data, s, len); |
name.__data = data; |
name.__length = len; |
} |
|
func Siginit() { |
runtime_initsig(SigQueue); |
sig.inuse = true; // enable reception of signals; cannot disable |
} |
/mem.c
0,0 → 1,155
/* Defining _XOPEN_SOURCE hides the declaration of madvise() on Solaris < |
11 and the MADV_DONTNEED definition on IRIX 6.5. */ |
#undef _XOPEN_SOURCE |
|
#include <errno.h> |
#include <unistd.h> |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
#ifndef MAP_ANON |
#ifdef MAP_ANONYMOUS |
#define MAP_ANON MAP_ANONYMOUS |
#else |
#define USE_DEV_ZERO |
#define MAP_ANON 0 |
#endif |
#endif |
|
#ifdef USE_DEV_ZERO |
static int dev_zero = -1; |
#endif |
|
static _Bool |
addrspace_free(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused))) |
{ |
#ifdef HAVE_MINCORE |
size_t page_size = getpagesize(); |
size_t off; |
char one_byte; |
|
errno = 0; |
for(off = 0; off < n; off += page_size) |
if(mincore((char *)v + off, page_size, (void *)&one_byte) != -1 |
|| errno != ENOMEM) |
return 0; |
#endif |
return 1; |
} |
|
void* |
runtime_SysAlloc(uintptr n) |
{ |
void *p; |
int fd = -1; |
|
mstats.sys += n; |
|
#ifdef USE_DEV_ZERO |
if (dev_zero == -1) { |
dev_zero = open("/dev/zero", O_RDONLY); |
if (dev_zero < 0) { |
runtime_printf("open /dev/zero: errno=%d\n", errno); |
exit(2); |
} |
} |
fd = dev_zero; |
#endif |
|
p = runtime_mmap(nil, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, fd, 0); |
if (p == MAP_FAILED) { |
if(errno == EACCES) { |
runtime_printf("runtime: mmap: access denied\n"); |
runtime_printf("if you're running SELinux, enable execmem for this process.\n"); |
exit(2); |
} |
return nil; |
} |
return p; |
} |
|
void |
runtime_SysUnused(void *v __attribute__ ((unused)), uintptr n __attribute__ ((unused))) |
{ |
#ifdef MADV_DONTNEED |
runtime_madvise(v, n, MADV_DONTNEED); |
#endif |
} |
|
void |
runtime_SysFree(void *v, uintptr n) |
{ |
mstats.sys -= n; |
runtime_munmap(v, n); |
} |
|
void* |
runtime_SysReserve(void *v, uintptr n) |
{ |
int fd = -1; |
void *p; |
|
// On 64-bit, people with ulimit -v set complain if we reserve too |
// much address space. Instead, assume that the reservation is okay |
// and check the assumption in SysMap. |
if(sizeof(void*) == 8) |
return v; |
|
#ifdef USE_DEV_ZERO |
if (dev_zero == -1) { |
dev_zero = open("/dev/zero", O_RDONLY); |
if (dev_zero < 0) { |
runtime_printf("open /dev/zero: errno=%d\n", errno); |
exit(2); |
} |
} |
fd = dev_zero; |
#endif |
|
p = runtime_mmap(v, n, PROT_NONE, MAP_ANON|MAP_PRIVATE, fd, 0); |
if((uintptr)p < 4096 || -(uintptr)p < 4096) { |
return nil; |
} |
return p; |
} |
|
void |
runtime_SysMap(void *v, uintptr n) |
{ |
void *p; |
int fd = -1; |
|
mstats.sys += n; |
|
#ifdef USE_DEV_ZERO |
if (dev_zero == -1) { |
dev_zero = open("/dev/zero", O_RDONLY); |
if (dev_zero < 0) { |
runtime_printf("open /dev/zero: errno=%d\n", errno); |
exit(2); |
} |
} |
fd = dev_zero; |
#endif |
|
// On 64-bit, we don't actually have v reserved, so tread carefully. |
if(sizeof(void*) == 8) { |
p = runtime_mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_PRIVATE, fd, 0); |
if(p != v && addrspace_free(v, n)) { |
// On some systems, mmap ignores v without |
// MAP_FIXED, so retry if the address space is free. |
p = runtime_mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0); |
} |
if(p != v) { |
runtime_printf("runtime: address space conflict: map(%p) = %p\n", v, p); |
runtime_throw("runtime: address space conflict"); |
} |
return; |
} |
|
p = runtime_mmap(v, n, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_ANON|MAP_FIXED|MAP_PRIVATE, fd, 0); |
if(p != v) |
runtime_throw("runtime: cannot map pages in arena address space"); |
} |
/mfixalloc.c
0,0 → 1,63
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Fixed-size object allocator. Returned memory is not zeroed. |
// |
// See malloc.h for overview. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
// Initialize f to allocate objects of the given size, |
// using the allocator to obtain chunks of memory. |
void |
runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg) |
{ |
f->size = size; |
f->alloc = alloc; |
f->first = first; |
f->arg = arg; |
f->list = nil; |
f->chunk = nil; |
f->nchunk = 0; |
f->inuse = 0; |
f->sys = 0; |
} |
|
void* |
runtime_FixAlloc_Alloc(FixAlloc *f) |
{ |
void *v; |
|
if(f->list) { |
v = f->list; |
f->list = *(void**)f->list; |
f->inuse += f->size; |
return v; |
} |
if(f->nchunk < f->size) { |
f->sys += FixAllocChunk; |
f->chunk = f->alloc(FixAllocChunk); |
if(f->chunk == nil) |
runtime_throw("out of memory (FixAlloc)"); |
f->nchunk = FixAllocChunk; |
} |
v = f->chunk; |
if(f->first) |
f->first(f->arg, v); |
f->chunk += f->size; |
f->nchunk -= f->size; |
f->inuse += f->size; |
return v; |
} |
|
void |
runtime_FixAlloc_Free(FixAlloc *f, void *p) |
{ |
f->inuse -= f->size; |
*(void**)p = f->list; |
f->list = p; |
} |
|
/cpuprof.c
0,0 → 1,433
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// CPU profiling. |
// Based on algorithms and data structures used in |
// http://code.google.com/p/google-perftools/. |
// |
// The main difference between this code and the google-perftools |
// code is that this code is written to allow copying the profile data |
// to an arbitrary io.Writer, while the google-perftools code always |
// writes to an operating system file. |
// |
// The signal handler for the profiling clock tick adds a new stack trace |
// to a hash table tracking counts for recent traces. Most clock ticks |
// hit in the cache. In the event of a cache miss, an entry must be |
// evicted from the hash table, copied to a log that will eventually be |
// written as profile data. The google-perftools code flushed the |
// log itself during the signal handler. This code cannot do that, because |
// the io.Writer might block or need system calls or locks that are not |
// safe to use from within the signal handler. Instead, we split the log |
// into two halves and let the signal handler fill one half while a goroutine |
// is writing out the other half. When the signal handler fills its half, it |
// offers to swap with the goroutine. If the writer is not done with its half, |
// we lose the stack trace for this clock tick (and record that loss). |
// The goroutine interacts with the signal handler by calling getprofile() to |
// get the next log piece to write, implicitly handing back the last log |
// piece it obtained. |
// |
// The state of this dance between the signal handler and the goroutine |
// is encoded in the Profile.handoff field. If handoff == 0, then the goroutine |
// is not using either log half and is waiting (or will soon be waiting) for |
// a new piece by calling notesleep(&p->wait). If the signal handler |
// changes handoff from 0 to non-zero, it must call notewakeup(&p->wait) |
// to wake the goroutine. The value indicates the number of entries in the |
// log half being handed off. The goroutine leaves the non-zero value in |
// place until it has finished processing the log half and then flips the number |
// back to zero. Setting the high bit in handoff means that the profiling is over, |
// and the goroutine is now in charge of flushing the data left in the hash table |
// to the log and returning that data. |
// |
// The handoff field is manipulated using atomic operations. |
// For the most part, the manipulation of handoff is orderly: if handoff == 0 |
// then the signal handler owns it and can change it to non-zero. |
// If handoff != 0 then the goroutine owns it and can change it to zero. |
// If that were the end of the story then we would not need to manipulate |
// handoff using atomic operations. The operations are needed, however, |
// in order to let the log closer set the high bit to indicate "EOF" safely |
// in the situation when normally the goroutine "owns" handoff. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
#include "array.h" |
typedef struct __go_open_array Slice; |
#define array __values |
#define len __count |
#define cap __capacity |
|
enum |
{ |
HashSize = 1<<10, |
LogSize = 1<<17, |
Assoc = 4, |
MaxStack = 64, |
}; |
|
typedef struct Profile Profile; |
typedef struct Bucket Bucket; |
typedef struct Entry Entry; |
|
struct Entry { |
uintptr count; |
uintptr depth; |
uintptr stack[MaxStack]; |
}; |
|
struct Bucket { |
Entry entry[Assoc]; |
}; |
|
struct Profile { |
bool on; // profiling is on |
Note wait; // goroutine waits here |
uintptr count; // tick count |
uintptr evicts; // eviction count |
uintptr lost; // lost ticks that need to be logged |
uintptr totallost; // total lost ticks |
|
// Active recent stack traces. |
Bucket hash[HashSize]; |
|
// Log of traces evicted from hash. |
// Signal handler has filled log[toggle][:nlog]. |
// Goroutine is writing log[1-toggle][:handoff]. |
uintptr log[2][LogSize/2]; |
uintptr nlog; |
int32 toggle; |
uint32 handoff; |
|
// Writer state. |
// Writer maintains its own toggle to avoid races |
// looking at signal handler's toggle. |
uint32 wtoggle; |
bool wholding; // holding & need to release a log half |
bool flushing; // flushing hash table - profile is over |
}; |
|
static Lock lk; |
static Profile *prof; |
|
static void tick(uintptr*, int32); |
static void add(Profile*, uintptr*, int32); |
static bool evict(Profile*, Entry*); |
static bool flushlog(Profile*); |
|
// LostProfileData is a no-op function used in profiles |
// to mark the number of profiling stack traces that were |
// discarded due to slow data writers. |
static void LostProfileData(void) { |
} |
|
extern void runtime_SetCPUProfileRate(int32) |
__asm__("libgo_runtime.runtime.SetCPUProfileRate"); |
|
// SetCPUProfileRate sets the CPU profiling rate. |
// The user documentation is in debug.go. |
void |
runtime_SetCPUProfileRate(int32 hz) |
{ |
uintptr *p; |
uintptr n; |
|
// Clamp hz to something reasonable. |
if(hz < 0) |
hz = 0; |
if(hz > 1000000) |
hz = 1000000; |
|
runtime_lock(&lk); |
if(hz > 0) { |
if(prof == nil) { |
prof = runtime_SysAlloc(sizeof *prof); |
if(prof == nil) { |
runtime_printf("runtime: cpu profiling cannot allocate memory\n"); |
runtime_unlock(&lk); |
return; |
} |
} |
if(prof->on || prof->handoff != 0) { |
runtime_printf("runtime: cannot set cpu profile rate until previous profile has finished.\n"); |
runtime_unlock(&lk); |
return; |
} |
|
prof->on = true; |
p = prof->log[0]; |
// pprof binary header format. |
// http://code.google.com/p/google-perftools/source/browse/trunk/src/profiledata.cc#117 |
*p++ = 0; // count for header |
*p++ = 3; // depth for header |
*p++ = 0; // version number |
*p++ = 1000000 / hz; // period (microseconds) |
*p++ = 0; |
prof->nlog = p - prof->log[0]; |
prof->toggle = 0; |
prof->wholding = false; |
prof->wtoggle = 0; |
prof->flushing = false; |
runtime_noteclear(&prof->wait); |
|
runtime_setcpuprofilerate(tick, hz); |
} else if(prof->on) { |
runtime_setcpuprofilerate(nil, 0); |
prof->on = false; |
|
// Now add is not running anymore, and getprofile owns the entire log. |
// Set the high bit in prof->handoff to tell getprofile. |
for(;;) { |
n = prof->handoff; |
if(n&0x80000000) |
runtime_printf("runtime: setcpuprofile(off) twice"); |
if(runtime_cas(&prof->handoff, n, n|0x80000000)) |
break; |
} |
if(n == 0) { |
// we did the transition from 0 -> nonzero so we wake getprofile |
runtime_notewakeup(&prof->wait); |
} |
} |
runtime_unlock(&lk); |
} |
|
static void |
tick(uintptr *pc, int32 n) |
{ |
add(prof, pc, n); |
} |
|
// add adds the stack trace to the profile. |
// It is called from signal handlers and other limited environments |
// and cannot allocate memory or acquire locks that might be |
// held at the time of the signal, nor can it use substantial amounts |
// of stack. It is allowed to call evict. |
static void |
add(Profile *p, uintptr *pc, int32 n) |
{ |
int32 i, j; |
uintptr h, x; |
Bucket *b; |
Entry *e; |
|
if(n > MaxStack) |
n = MaxStack; |
|
// Compute hash. |
h = 0; |
for(i=0; i<n; i++) { |
h = h<<8 | (h>>(8*(sizeof(h)-1))); |
x = pc[i]; |
h += x*31 + x*7 + x*3; |
} |
p->count++; |
|
// Add to entry count if already present in table. |
b = &p->hash[h%HashSize]; |
for(i=0; i<Assoc; i++) { |
e = &b->entry[i]; |
if(e->depth != (uintptr)n) |
continue; |
for(j=0; j<n; j++) |
if(e->stack[j] != pc[j]) |
goto ContinueAssoc; |
e->count++; |
return; |
ContinueAssoc:; |
} |
|
// Evict entry with smallest count. |
e = &b->entry[0]; |
for(i=1; i<Assoc; i++) |
if(b->entry[i].count < e->count) |
e = &b->entry[i]; |
if(e->count > 0) { |
if(!evict(p, e)) { |
// Could not evict entry. Record lost stack. |
p->lost++; |
p->totallost++; |
return; |
} |
p->evicts++; |
} |
|
// Reuse the newly evicted entry. |
e->depth = n; |
e->count = 1; |
for(i=0; i<n; i++) |
e->stack[i] = pc[i]; |
} |
|
// evict copies the given entry's data into the log, so that |
// the entry can be reused. evict is called from add, which |
// is called from the profiling signal handler, so it must not |
// allocate memory or block. It is safe to call flushLog. |
// evict returns true if the entry was copied to the log, |
// false if there was no room available. |
static bool |
evict(Profile *p, Entry *e) |
{ |
int32 i, d, nslot; |
uintptr *log, *q; |
|
d = e->depth; |
nslot = d+2; |
log = p->log[p->toggle]; |
if(p->nlog+nslot > nelem(p->log[0])) { |
if(!flushlog(p)) |
return false; |
log = p->log[p->toggle]; |
} |
|
q = log+p->nlog; |
*q++ = e->count; |
*q++ = d; |
for(i=0; i<d; i++) |
*q++ = e->stack[i]; |
p->nlog = q - log; |
e->count = 0; |
return true; |
} |
|
// flushlog tries to flush the current log and switch to the other one. |
// flushlog is called from evict, called from add, called from the signal handler, |
// so it cannot allocate memory or block. It can try to swap logs with |
// the writing goroutine, as explained in the comment at the top of this file. |
static bool |
flushlog(Profile *p) |
{ |
uintptr *log, *q; |
|
if(!runtime_cas(&p->handoff, 0, p->nlog)) |
return false; |
runtime_notewakeup(&p->wait); |
|
p->toggle = 1 - p->toggle; |
log = p->log[p->toggle]; |
q = log; |
if(p->lost > 0) { |
*q++ = p->lost; |
*q++ = 1; |
*q++ = (uintptr)LostProfileData; |
} |
p->nlog = q - log; |
return true; |
} |
|
// getprofile blocks until the next block of profiling data is available |
// and returns it as a []byte. It is called from the writing goroutine. |
Slice |
getprofile(Profile *p) |
{ |
uint32 i, j, n; |
Slice ret; |
Bucket *b; |
Entry *e; |
|
ret.array = nil; |
ret.len = 0; |
ret.cap = 0; |
|
if(p == nil) |
return ret; |
|
if(p->wholding) { |
// Release previous log to signal handling side. |
// Loop because we are racing against setprofile(off). |
for(;;) { |
n = p->handoff; |
if(n == 0) { |
runtime_printf("runtime: phase error during cpu profile handoff\n"); |
return ret; |
} |
if(n & 0x80000000) { |
p->wtoggle = 1 - p->wtoggle; |
p->wholding = false; |
p->flushing = true; |
goto flush; |
} |
if(runtime_cas(&p->handoff, n, 0)) |
break; |
} |
p->wtoggle = 1 - p->wtoggle; |
p->wholding = false; |
} |
|
if(p->flushing) |
goto flush; |
|
if(!p->on && p->handoff == 0) |
return ret; |
|
// Wait for new log. |
runtime_entersyscall(); |
runtime_notesleep(&p->wait); |
runtime_exitsyscall(); |
runtime_noteclear(&p->wait); |
|
n = p->handoff; |
if(n == 0) { |
runtime_printf("runtime: phase error during cpu profile wait\n"); |
return ret; |
} |
if(n == 0x80000000) { |
p->flushing = true; |
goto flush; |
} |
n &= ~0x80000000; |
|
// Return new log to caller. |
p->wholding = true; |
|
ret.array = (byte*)p->log[p->wtoggle]; |
ret.len = n*sizeof(uintptr); |
ret.cap = ret.len; |
return ret; |
|
flush: |
// In flush mode. |
// Add is no longer being called. We own the log. |
// Also, p->handoff is non-zero, so flushlog will return false. |
// Evict the hash table into the log and return it. |
for(i=0; i<HashSize; i++) { |
b = &p->hash[i]; |
for(j=0; j<Assoc; j++) { |
e = &b->entry[j]; |
if(e->count > 0 && !evict(p, e)) { |
// Filled the log. Stop the loop and return what we've got. |
goto breakflush; |
} |
} |
} |
breakflush: |
|
// Return pending log data. |
if(p->nlog > 0) { |
// Note that we're using toggle now, not wtoggle, |
// because we're working on the log directly. |
ret.array = (byte*)p->log[p->toggle]; |
ret.len = p->nlog*sizeof(uintptr); |
ret.cap = ret.len; |
p->nlog = 0; |
return ret; |
} |
|
// Made it through the table without finding anything to log. |
// Finally done. Clean up and return nil. |
p->flushing = false; |
if(!runtime_cas(&p->handoff, p->handoff, 0)) |
runtime_printf("runtime: profile flush racing with something\n"); |
return ret; // set to nil at top of function |
} |
|
extern Slice runtime_CPUProfile(void) |
__asm__("libgo_runtime.runtime.CPUProfile"); |
|
// CPUProfile returns the next cpu profile block as a []byte. |
// The user documentation is in debug.go. |
Slice |
runtime_CPUProfile(void) |
{ |
return getprofile(prof); |
} |
/go-check-interface.c
0,0 → 1,46
/* go-check-interface.c -- check an interface type for a conversion |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-panic.h" |
#include "interface.h" |
|
/* Check that an interface type matches for a conversion to a |
non-interface type. This panics if the types are bad. The actual |
extraction of the object is inlined. */ |
|
void |
__go_check_interface_type ( |
const struct __go_type_descriptor *lhs_descriptor, |
const struct __go_type_descriptor *rhs_descriptor, |
const struct __go_type_descriptor *rhs_inter_descriptor) |
{ |
if (rhs_descriptor == NULL) |
{ |
struct __go_empty_interface panic_arg; |
|
newTypeAssertionError(NULL, NULL, lhs_descriptor, NULL, NULL, |
lhs_descriptor->__reflection, NULL, &panic_arg); |
__go_panic(panic_arg); |
} |
|
if (lhs_descriptor != rhs_descriptor |
&& !__go_type_descriptors_equal (lhs_descriptor, rhs_descriptor) |
&& (lhs_descriptor->__code != GO_UNSAFE_POINTER |
|| !__go_is_pointer_type (rhs_descriptor)) |
&& (rhs_descriptor->__code != GO_UNSAFE_POINTER |
|| !__go_is_pointer_type (lhs_descriptor))) |
{ |
struct __go_empty_interface panic_arg; |
|
newTypeAssertionError(rhs_inter_descriptor, rhs_descriptor, |
lhs_descriptor, |
rhs_inter_descriptor->__reflection, |
rhs_descriptor->__reflection, |
lhs_descriptor->__reflection, |
NULL, &panic_arg); |
__go_panic(panic_arg); |
} |
} |
/mcache.c
0,0 → 1,134
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Per-thread (in Go, per-M) malloc cache for small objects. |
// |
// See malloc.h for an overview. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
void* |
runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed) |
{ |
MCacheList *l; |
MLink *first, *v; |
int32 n; |
|
// Allocate from list. |
l = &c->list[sizeclass]; |
if(l->list == nil) { |
// Replenish using central lists. |
n = runtime_MCentral_AllocList(&runtime_mheap.central[sizeclass], |
runtime_class_to_transfercount[sizeclass], &first); |
if(n == 0) |
runtime_throw("out of memory"); |
l->list = first; |
l->nlist = n; |
c->size += n*size; |
} |
v = l->list; |
l->list = v->next; |
l->nlist--; |
if(l->nlist < l->nlistmin) |
l->nlistmin = l->nlist; |
c->size -= size; |
|
// v is zeroed except for the link pointer |
// that we used above; zero that. |
v->next = nil; |
if(zeroed) { |
// block is zeroed iff second word is zero ... |
if(size > sizeof(uintptr) && ((uintptr*)v)[1] != 0) |
runtime_memclr((byte*)v, size); |
else { |
// ... except for the link pointer |
// that we used above; zero that. |
v->next = nil; |
} |
} |
c->local_cachealloc += size; |
c->local_objects++; |
return v; |
} |
|
// Take n elements off l and return them to the central free list. |
static void |
ReleaseN(MCache *c, MCacheList *l, int32 n, int32 sizeclass) |
{ |
MLink *first, **lp; |
int32 i; |
|
// Cut off first n elements. |
first = l->list; |
lp = &l->list; |
for(i=0; i<n; i++) |
lp = &(*lp)->next; |
l->list = *lp; |
*lp = nil; |
l->nlist -= n; |
if(l->nlist < l->nlistmin) |
l->nlistmin = l->nlist; |
c->size -= n*runtime_class_to_size[sizeclass]; |
|
// Return them to central free list. |
runtime_MCentral_FreeList(&runtime_mheap.central[sizeclass], n, first); |
} |
|
void |
runtime_MCache_Free(MCache *c, void *v, int32 sizeclass, uintptr size) |
{ |
int32 i, n; |
MCacheList *l; |
MLink *p; |
|
// Put back on list. |
l = &c->list[sizeclass]; |
p = v; |
p->next = l->list; |
l->list = p; |
l->nlist++; |
c->size += size; |
c->local_cachealloc -= size; |
c->local_objects--; |
|
if(l->nlist >= MaxMCacheListLen) { |
// Release a chunk back. |
ReleaseN(c, l, runtime_class_to_transfercount[sizeclass], sizeclass); |
} |
|
if(c->size >= MaxMCacheSize) { |
// Scavenge. |
for(i=0; i<NumSizeClasses; i++) { |
l = &c->list[i]; |
n = l->nlistmin; |
|
// n is the minimum number of elements we've seen on |
// the list since the last scavenge. If n > 0, it means that |
// we could have gotten by with n fewer elements |
// without needing to consult the central free list. |
// Move toward that situation by releasing n/2 of them. |
if(n > 0) { |
if(n > 1) |
n /= 2; |
ReleaseN(c, l, n, i); |
} |
l->nlistmin = l->nlist; |
} |
} |
} |
|
void |
runtime_MCache_ReleaseAll(MCache *c) |
{ |
int32 i; |
MCacheList *l; |
|
for(i=0; i<NumSizeClasses; i++) { |
l = &c->list[i]; |
ReleaseN(c, l, l->nlist, i); |
l->nlistmin = 0; |
} |
} |
/go-convert-interface.c
0,0 → 1,138
/* go-convert-interface.c -- convert interfaces for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-alloc.h" |
#include "go-assert.h" |
#include "go-panic.h" |
#include "interface.h" |
|
/* This is called when converting one interface type into another |
interface type. LHS_DESCRIPTOR is the type descriptor of the |
resulting interface. RHS_DESCRIPTOR is the type descriptor of the |
object being converted. This builds and returns a new interface |
method table. If any method in the LHS_DESCRIPTOR interface is not |
implemented by the object, the conversion fails. If the conversion |
fails, then if MAY_FAIL is true this returns NULL; otherwise, it |
panics. */ |
|
void * |
__go_convert_interface_2 (const struct __go_type_descriptor *lhs_descriptor, |
const struct __go_type_descriptor *rhs_descriptor, |
_Bool may_fail) |
{ |
const struct __go_interface_type *lhs_interface; |
int lhs_method_count; |
const struct __go_interface_method* lhs_methods; |
const void **methods; |
const struct __go_uncommon_type *rhs_uncommon; |
int rhs_method_count; |
const struct __go_method *p_rhs_method; |
int i; |
|
if (rhs_descriptor == NULL) |
{ |
/* A nil value always converts to nil. */ |
return NULL; |
} |
|
__go_assert (lhs_descriptor->__code == GO_INTERFACE); |
lhs_interface = (const struct __go_interface_type *) lhs_descriptor; |
lhs_method_count = lhs_interface->__methods.__count; |
lhs_methods = ((const struct __go_interface_method *) |
lhs_interface->__methods.__values); |
|
/* This should not be called for an empty interface. */ |
__go_assert (lhs_method_count > 0); |
|
rhs_uncommon = rhs_descriptor->__uncommon; |
if (rhs_uncommon == NULL || rhs_uncommon->__methods.__count == 0) |
{ |
struct __go_empty_interface panic_arg; |
|
if (may_fail) |
return NULL; |
|
newTypeAssertionError (NULL, |
rhs_descriptor, |
lhs_descriptor, |
NULL, |
rhs_descriptor->__reflection, |
lhs_descriptor->__reflection, |
lhs_methods[0].__name, |
&panic_arg); |
__go_panic (panic_arg); |
} |
|
rhs_method_count = rhs_uncommon->__methods.__count; |
p_rhs_method = ((const struct __go_method *) |
rhs_uncommon->__methods.__values); |
|
methods = NULL; |
|
for (i = 0; i < lhs_method_count; ++i) |
{ |
const struct __go_interface_method *p_lhs_method; |
|
p_lhs_method = &lhs_methods[i]; |
|
while (rhs_method_count > 0 |
&& (!__go_ptr_strings_equal (p_lhs_method->__name, |
p_rhs_method->__name) |
|| !__go_ptr_strings_equal (p_lhs_method->__pkg_path, |
p_rhs_method->__pkg_path))) |
{ |
++p_rhs_method; |
--rhs_method_count; |
} |
|
if (rhs_method_count == 0 |
|| !__go_type_descriptors_equal (p_lhs_method->__type, |
p_rhs_method->__mtype)) |
{ |
struct __go_empty_interface panic_arg; |
|
if (methods != NULL) |
__go_free (methods); |
|
if (may_fail) |
return NULL; |
|
newTypeAssertionError (NULL, |
rhs_descriptor, |
lhs_descriptor, |
NULL, |
rhs_descriptor->__reflection, |
lhs_descriptor->__reflection, |
p_lhs_method->__name, |
&panic_arg); |
__go_panic (panic_arg); |
} |
|
if (methods == NULL) |
{ |
methods = (const void **) __go_alloc ((lhs_method_count + 1) |
* sizeof (void *)); |
|
/* The first field in the method table is always the type of |
the object. */ |
methods[0] = rhs_descriptor; |
} |
|
methods[i + 1] = p_rhs_method->__function; |
} |
|
return methods; |
} |
|
/* This is called by the compiler to convert a value from one |
interface type to another. */ |
|
void * |
__go_convert_interface (const struct __go_type_descriptor *lhs_descriptor, |
const struct __go_type_descriptor *rhs_descriptor) |
{ |
return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0); |
} |
/go-type-interface.c
0,0 → 1,55
/* go-type-interface.c -- hash and equality interface functions. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "interface.h" |
#include "go-type.h" |
|
/* A hash function for an interface. */ |
|
uintptr_t |
__go_type_hash_interface (const void *vval, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
const struct __go_interface *val; |
const struct __go_type_descriptor *descriptor; |
uintptr_t size; |
|
val = (const struct __go_interface *) vval; |
if (val->__methods == NULL) |
return 0; |
descriptor = (const struct __go_type_descriptor *) val->__methods[0]; |
size = descriptor->__size; |
if (__go_is_pointer_type (descriptor)) |
return descriptor->__hashfn (&val->__object, size); |
else |
return descriptor->__hashfn (val->__object, size); |
} |
|
/* An equality function for an interface. */ |
|
_Bool |
__go_type_equal_interface (const void *vv1, const void *vv2, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
const struct __go_interface *v1; |
const struct __go_interface *v2; |
const struct __go_type_descriptor* v1_descriptor; |
const struct __go_type_descriptor* v2_descriptor; |
|
v1 = (const struct __go_interface *) vv1; |
v2 = (const struct __go_interface *) vv2; |
if (v1->__methods == NULL || v2->__methods == NULL) |
return v1->__methods == v2->__methods; |
v1_descriptor = (const struct __go_type_descriptor *) v1->__methods[0]; |
v2_descriptor = (const struct __go_type_descriptor *) v2->__methods[0]; |
if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor)) |
return 0; |
if (__go_is_pointer_type (v1_descriptor)) |
return v1->__object == v2->__object; |
else |
return v1_descriptor->__equalfn (v1->__object, v2->__object, |
v1_descriptor->__size); |
} |
/arch.h
0,0 → 1,8
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// FIXME: Ideally CacheLineSize would be dependent on the host architecture. |
enum { |
CacheLineSize = 64 |
}; |
/go-map-index.c
0,0 → 1,135
/* go-map-index.c -- find or insert an entry in a map. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdlib.h> |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-assert.h" |
#include "map.h" |
|
/* Rehash MAP to a larger size. */ |
|
static void |
__go_map_rehash (struct __go_map *map) |
{ |
const struct __go_map_descriptor *descriptor; |
const struct __go_type_descriptor *key_descriptor; |
uintptr_t key_offset; |
size_t key_size; |
uintptr_t (*hashfn) (const void *, uintptr_t); |
uintptr_t old_bucket_count; |
void **old_buckets; |
uintptr_t new_bucket_count; |
void **new_buckets; |
uintptr_t i; |
|
descriptor = map->__descriptor; |
|
key_descriptor = descriptor->__map_descriptor->__key_type; |
key_offset = descriptor->__key_offset; |
key_size = key_descriptor->__size; |
hashfn = key_descriptor->__hashfn; |
|
old_bucket_count = map->__bucket_count; |
old_buckets = map->__buckets; |
|
new_bucket_count = __go_map_next_prime (old_bucket_count * 2); |
new_buckets = (void **) __go_alloc (new_bucket_count * sizeof (void *)); |
__builtin_memset (new_buckets, 0, new_bucket_count * sizeof (void *)); |
|
for (i = 0; i < old_bucket_count; ++i) |
{ |
char* entry; |
char* next; |
|
for (entry = old_buckets[i]; entry != NULL; entry = next) |
{ |
size_t key_hash; |
size_t new_bucket_index; |
|
/* We could speed up rehashing at the cost of memory space |
by caching the hash code. */ |
key_hash = hashfn (entry + key_offset, key_size); |
new_bucket_index = key_hash % new_bucket_count; |
|
next = *(char **) entry; |
*(char **) entry = new_buckets[new_bucket_index]; |
new_buckets[new_bucket_index] = entry; |
} |
} |
|
__go_free (old_buckets); |
|
map->__bucket_count = new_bucket_count; |
map->__buckets = new_buckets; |
} |
|
/* Find KEY in MAP, return a pointer to the value. If KEY is not |
present, then if INSERT is false, return NULL, and if INSERT is |
true, insert a new value and zero-initialize it before returning a |
pointer to it. */ |
|
void * |
__go_map_index (struct __go_map *map, const void *key, _Bool insert) |
{ |
const struct __go_map_descriptor *descriptor; |
const struct __go_type_descriptor *key_descriptor; |
uintptr_t key_offset; |
_Bool (*equalfn) (const void*, const void*, uintptr_t); |
size_t key_hash; |
size_t key_size; |
size_t bucket_index; |
char *entry; |
|
if (map == NULL) |
{ |
if (insert) |
runtime_panicstring ("assignment to entry in nil map"); |
return NULL; |
} |
|
descriptor = map->__descriptor; |
|
key_descriptor = descriptor->__map_descriptor->__key_type; |
key_offset = descriptor->__key_offset; |
key_size = key_descriptor->__size; |
__go_assert (key_size != 0 && key_size != -1UL); |
equalfn = key_descriptor->__equalfn; |
|
key_hash = key_descriptor->__hashfn (key, key_size); |
bucket_index = key_hash % map->__bucket_count; |
|
entry = (char *) map->__buckets[bucket_index]; |
while (entry != NULL) |
{ |
if (equalfn (key, entry + key_offset, key_size)) |
return entry + descriptor->__val_offset; |
entry = *(char **) entry; |
} |
|
if (!insert) |
return NULL; |
|
if (map->__element_count >= map->__bucket_count) |
{ |
__go_map_rehash (map); |
bucket_index = key_hash % map->__bucket_count; |
} |
|
entry = (char *) __go_alloc (descriptor->__entry_size); |
__builtin_memset (entry, 0, descriptor->__entry_size); |
|
__builtin_memcpy (entry + key_offset, key, key_size); |
|
*(char **) entry = map->__buckets[bucket_index]; |
map->__buckets[bucket_index] = entry; |
|
map->__element_count += 1; |
|
return entry + descriptor->__val_offset; |
} |
/go-deferred-recover.c
0,0 → 1,94
/* go-deferred-recover.c -- support for a deferred recover function. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "runtime.h" |
#include "go-panic.h" |
#include "go-defer.h" |
|
/* This is called when a call to recover is deferred. That is, |
something like |
defer recover() |
|
We need to handle this specially. In 6g/8g, the recover function |
looks up the stack frame. In particular, that means that a |
deferred recover will not recover a panic thrown in the same |
function that defers the recover. It will only recover a panic |
thrown in a function that defers the deferred call to recover. |
|
In other words: |
|
func f1() { |
defer recover() // does not stop panic |
panic(0) |
} |
|
func f2() { |
defer func() { |
defer recover() // stops panic(0) |
}() |
panic(0) |
} |
|
func f3() { |
defer func() { |
defer recover() // does not stop panic |
panic(0) |
}() |
panic(1) |
} |
|
func f4() { |
defer func() { |
defer func() { |
defer recover() // stops panic(0) |
}() |
panic(0) |
}() |
panic(1) |
} |
|
The interesting case here is f3. As can be seen from f2, the |
deferred recover could pick up panic(1). However, this does not |
happen because it is blocked by the panic(0). |
|
When a function calls recover, then when we invoke it we pass a |
hidden parameter indicating whether it should recover something. |
This parameter is set based on whether the function is being |
invoked directly from defer. The parameter winds up determining |
whether __go_recover or __go_deferred_recover is called at all. |
|
In the case of a deferred recover, the hidden parameter which |
controls the call is actually the one set up for the function which |
runs the defer recover() statement. That is the right thing in all |
the cases above except for f3. In f3 the function is permitted to |
call recover, but the deferred recover call is not. We address |
that here by checking for that specific case before calling |
recover. If this function was deferred when there is already a |
panic on the panic stack, then we can only recover that panic, not |
any other. |
|
Note that we can get away with using a special function here |
because you are not permitted to take the address of a predeclared |
function like recover. */ |
|
struct __go_empty_interface |
__go_deferred_recover () |
{ |
G *g; |
|
g = runtime_g (); |
if (g->defer == NULL || g->defer->__panic != g->panic) |
{ |
struct __go_empty_interface ret; |
|
ret.__type_descriptor = NULL; |
ret.__object = NULL; |
return ret; |
} |
return __go_recover (); |
} |
/go-reflect.c
0,0 → 1,192
/* go-reflect.c -- implement unsafe.Reflect and unsafe.Typeof for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdlib.h> |
#include <stdint.h> |
|
#include "runtime.h" |
#include "interface.h" |
#include "go-alloc.h" |
#include "go-string.h" |
#include "go-type.h" |
|
/* For field alignment. */ |
|
struct field_align |
{ |
char c; |
struct __go_type_descriptor *p; |
}; |
|
/* The type descriptors in the runtime package. */ |
|
extern const struct __go_type_descriptor ptr_bool_descriptor |
asm ("__go_td_pN30_libgo_runtime.runtime.BoolType"); |
extern const struct __go_type_descriptor ptr_float_descriptor |
asm ("__go_td_pN31_libgo_runtime.runtime.FloatType"); |
extern const struct __go_type_descriptor ptr_complex_descriptor |
asm ("__go_td_pN33_libgo_runtime.runtime.ComplexType"); |
extern const struct __go_type_descriptor ptr_int_descriptor |
asm ("__go_td_pN29_libgo_runtime.runtime.IntType"); |
extern const struct __go_type_descriptor ptr_uint_descriptor |
asm ("__go_td_pN30_libgo_runtime.runtime.UintType"); |
extern const struct __go_type_descriptor ptr_string_descriptor |
asm ("__go_td_pN32_libgo_runtime.runtime.StringType"); |
extern const struct __go_type_descriptor ptr_unsafe_pointer_decriptor |
asm ("__go_td_pN39_libgo_runtime.runtime.UnsafePointerType"); |
extern const struct __go_type_descriptor ptr_array_descriptor |
asm ("__go_td_pN31_libgo_runtime.runtime.ArrayType"); |
extern const struct __go_type_descriptor ptr_slice_descriptor |
asm ("__go_td_pN31_libgo_runtime.runtime.SliceType"); |
extern const struct __go_type_descriptor ptr_chan_descriptor |
asm ("__go_td_pN30_libgo_runtime.runtime.ChanType"); |
extern const struct __go_type_descriptor ptr_func_descriptor |
asm ("__go_td_pN30_libgo_runtime.runtime.FuncType"); |
extern const struct __go_type_descriptor ptr_interface_descriptor |
asm ("__go_td_pN35_libgo_runtime.runtime.InterfaceType"); |
extern const struct __go_type_descriptor ptr_map_descriptor |
asm ("__go_td_pN29_libgo_runtime.runtime.MapType"); |
extern const struct __go_type_descriptor ptr_ptr_descriptor |
asm ("__go_td_pN29_libgo_runtime.runtime.PtrType"); |
extern const struct __go_type_descriptor ptr_struct_descriptor |
asm ("__go_td_pN32_libgo_runtime.runtime.StructType"); |
|
const struct __go_type_descriptor * |
get_descriptor (int code) |
{ |
switch (code & GO_CODE_MASK) |
{ |
case GO_BOOL: |
return &ptr_bool_descriptor; |
case GO_FLOAT32: |
case GO_FLOAT64: |
return &ptr_float_descriptor; |
case GO_COMPLEX64: |
case GO_COMPLEX128: |
return &ptr_complex_descriptor; |
case GO_INT16: |
case GO_INT32: |
case GO_INT64: |
case GO_INT8: |
case GO_INT: |
return &ptr_int_descriptor; |
case GO_UINT16: |
case GO_UINT32: |
case GO_UINT64: |
case GO_UINT8: |
case GO_UINTPTR: |
case GO_UINT: |
return &ptr_uint_descriptor; |
case GO_STRING: |
return &ptr_string_descriptor; |
case GO_UNSAFE_POINTER: |
return &ptr_unsafe_pointer_decriptor; |
case GO_ARRAY: |
return &ptr_array_descriptor; |
case GO_SLICE: |
return &ptr_slice_descriptor; |
case GO_CHAN: |
return &ptr_chan_descriptor; |
case GO_FUNC: |
return &ptr_func_descriptor; |
case GO_INTERFACE: |
return &ptr_interface_descriptor; |
case GO_MAP: |
return &ptr_map_descriptor; |
case GO_PTR: |
return &ptr_ptr_descriptor; |
case GO_STRUCT: |
return &ptr_struct_descriptor; |
default: |
abort (); |
} |
} |
|
/* Implement unsafe.Reflect. */ |
|
struct reflect_ret |
{ |
struct __go_empty_interface rettype; |
void *addr; |
}; |
|
struct reflect_ret Reflect (struct __go_empty_interface) |
asm ("libgo_unsafe.unsafe.Reflect"); |
|
struct reflect_ret |
Reflect (struct __go_empty_interface e) |
{ |
struct reflect_ret ret; |
|
if (((uintptr_t) e.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
if (e.__type_descriptor == NULL) |
{ |
ret.rettype.__type_descriptor = NULL; |
ret.rettype.__object = NULL; |
ret.addr = NULL; |
} |
else |
{ |
size_t size; |
|
ret.rettype.__type_descriptor = |
get_descriptor (e.__type_descriptor->__code); |
|
/* This memcpy is really just an assignment of a const pointer |
to a non-const pointer. FIXME: We should canonicalize this |
pointer, so that for a given type we always return the same |
pointer. */ |
__builtin_memcpy (&ret.rettype.__object, &e.__type_descriptor, |
sizeof (void *)); |
|
/* Make a copy of the value. */ |
size = e.__type_descriptor->__size; |
if (size <= sizeof (uint64_t)) |
ret.addr = __go_alloc (sizeof (uint64_t)); |
else |
ret.addr = __go_alloc (size); |
if (__go_is_pointer_type (e.__type_descriptor)) |
*(void **) ret.addr = e.__object; |
else |
__builtin_memcpy (ret.addr, e.__object, size); |
} |
|
return ret; |
} |
|
/* Implement unsafe.Typeof. */ |
|
struct __go_empty_interface Typeof (struct __go_empty_interface) |
asm ("libgo_unsafe.unsafe.Typeof"); |
|
struct __go_empty_interface |
Typeof (const struct __go_empty_interface e) |
{ |
struct __go_empty_interface ret; |
|
if (((uintptr_t) e.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
if (e.__type_descriptor == NULL) |
{ |
ret.__type_descriptor = NULL; |
ret.__object = NULL; |
} |
else |
{ |
ret.__type_descriptor = get_descriptor (e.__type_descriptor->__code); |
|
/* This memcpy is really just an assignment of a const pointer |
to a non-const pointer. FIXME: We should canonicalize this |
pointer, so that for a given type we always return the same |
pointer. */ |
__builtin_memcpy (&ret.__object, &e.__type_descriptor, sizeof (void *)); |
} |
|
return ret; |
} |
/go-interface-eface-compare.c
0,0 → 1,35
/* go-interface-eface-compare.c -- compare non-empty and empty interface. |
|
Copyright 2011 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "interface.h" |
|
/* Compare a non-empty interface value with an empty interface value. |
Return 0 for equal, not zero for not equal (return value is like |
strcmp). */ |
|
int |
__go_interface_empty_compare (struct __go_interface left, |
struct __go_empty_interface right) |
{ |
const struct __go_type_descriptor *left_descriptor; |
|
if (((uintptr_t) right.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
if (left.__methods == NULL && right.__type_descriptor == NULL) |
return 0; |
if (left.__methods == NULL || right.__type_descriptor == NULL) |
return 1; |
left_descriptor = left.__methods[0]; |
if (!__go_type_descriptors_equal (left_descriptor, right.__type_descriptor)) |
return 1; |
if (__go_is_pointer_type (left_descriptor)) |
return left.__object == right.__object ? 0 : 1; |
if (!left_descriptor->__equalfn (left.__object, right.__object, |
left_descriptor->__size)) |
return 1; |
return 0; |
} |
/go-type-string.c
0,0 → 1,45
/* go-type-string.c -- hash and equality string functions. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "go-string.h" |
#include "go-type.h" |
|
/* A string hash function for a map. */ |
|
uintptr_t |
__go_type_hash_string (const void *vkey, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
uintptr_t ret; |
const struct __go_string *key; |
int len; |
int i; |
const unsigned char *p; |
|
ret = 5381; |
key = (const struct __go_string *) vkey; |
len = key->__length; |
for (i = 0, p = key->__data; i < len; i++, p++) |
ret = ret * 33 + *p; |
return ret; |
} |
|
/* A string equality function for a map. */ |
|
_Bool |
__go_type_equal_string (const void *vk1, const void *vk2, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
const struct __go_string *k1; |
const struct __go_string *k2; |
|
k1 = (const struct __go_string *) vk1; |
k2 = (const struct __go_string *) vk2; |
return (k1->__length == k2->__length |
&& __builtin_memcmp (k1->__data, k2->__data, k1->__length) == 0); |
} |
/go-type.h
0,0 → 1,333
/* go-type.h -- basic information for a Go type. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_GO_TYPE_H |
#define LIBGO_GO_TYPE_H |
|
#include <stddef.h> |
#include <stdint.h> |
|
#include "go-string.h" |
#include "array.h" |
|
/* Many of the types in this file must match the data structures |
generated by the compiler, and must also match the Go types which |
appear in go/runtime/type.go and go/reflect/type.go. */ |
|
/* Type kinds. These are used to get the type descriptor to use for |
the type itself, when using unsafe.Typeof or unsafe.Reflect. The |
values here must match the values generated by the compiler (the |
RUNTIME_TYPE_KIND_xxx values in gcc/go/types.h). These are macros |
rather than an enum to make it easy to change values in the future |
and hard to get confused about it. |
|
These correspond to the kind values used by the gc compiler. */ |
|
#define GO_BOOL 1 |
#define GO_INT 2 |
#define GO_INT8 3 |
#define GO_INT16 4 |
#define GO_INT32 5 |
#define GO_INT64 6 |
#define GO_UINT 7 |
#define GO_UINT8 8 |
#define GO_UINT16 9 |
#define GO_UINT32 10 |
#define GO_UINT64 11 |
#define GO_UINTPTR 12 |
#define GO_FLOAT32 13 |
#define GO_FLOAT64 14 |
#define GO_COMPLEX64 15 |
#define GO_COMPLEX128 16 |
#define GO_ARRAY 17 |
#define GO_CHAN 18 |
#define GO_FUNC 19 |
#define GO_INTERFACE 20 |
#define GO_MAP 21 |
#define GO_PTR 22 |
#define GO_SLICE 23 |
#define GO_STRING 24 |
#define GO_STRUCT 25 |
#define GO_UNSAFE_POINTER 26 |
|
#define GO_NO_POINTERS (1 << 7) |
|
#define GO_CODE_MASK 0x7f |
|
/* For each Go type the compiler constructs one of these structures. |
This is used for type reflectin, interfaces, maps, and reference |
counting. */ |
|
struct __go_type_descriptor |
{ |
/* The type code for this type, one of the type kind values above. |
This is used by unsafe.Reflect and unsafe.Typeof to determine the |
type descriptor to return for this type itself. It is also used |
by reflect.toType when mapping to a reflect Type structure. */ |
unsigned char __code; |
|
/* The alignment in bytes of a variable with this type. */ |
unsigned char __align; |
|
/* The alignment in bytes of a struct field with this type. */ |
unsigned char __field_align; |
|
/* The size in bytes of a value of this type. Note that all types |
in Go have a fixed size. */ |
uintptr_t __size; |
|
/* The type's hash code. */ |
uint32_t __hash; |
|
/* This function takes a pointer to a value of this type, and the |
size of this type, and returns a hash code. We pass the size |
explicitly becaues it means that we can share a single instance |
of this function for various different types. */ |
uintptr_t (*__hashfn) (const void *, uintptr_t); |
|
/* This function takes two pointers to values of this type, and the |
size of this type, and returns whether the values are equal. */ |
_Bool (*__equalfn) (const void *, const void *, uintptr_t); |
|
/* A string describing this type. This is only used for |
debugging. */ |
const struct __go_string *__reflection; |
|
/* A pointer to fields which are only used for some types. */ |
const struct __go_uncommon_type *__uncommon; |
|
/* The descriptor for the type which is a pointer to this type. |
This may be NULL. */ |
const struct __go_type_descriptor *__pointer_to_this; |
}; |
|
/* The information we store for each method of a type. */ |
|
struct __go_method |
{ |
/* The name of the method. */ |
const struct __go_string *__name; |
|
/* This is NULL for an exported method, or the name of the package |
where it lives. */ |
const struct __go_string *__pkg_path; |
|
/* The type of the method, without the receiver. This will be a |
function type. */ |
const struct __go_type_descriptor *__mtype; |
|
/* The type of the method, with the receiver. This will be a |
function type. */ |
const struct __go_type_descriptor *__type; |
|
/* A pointer to the code which implements the method. This is |
really a function pointer. */ |
const void *__function; |
}; |
|
/* Additional information that we keep for named types and for types |
with methods. */ |
|
struct __go_uncommon_type |
{ |
/* The name of the type. */ |
const struct __go_string *__name; |
|
/* The type's package. This is NULL for builtin types. */ |
const struct __go_string *__pkg_path; |
|
/* The type's methods. This is an array of struct __go_method. */ |
struct __go_open_array __methods; |
}; |
|
/* The type descriptor for a fixed array type. */ |
|
struct __go_array_type |
{ |
/* Starts like all type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* The element type. */ |
struct __go_type_descriptor *__element_type; |
|
/* The type of a slice of the same element type. */ |
struct __go_type_descriptor *__slice_type; |
|
/* The length of the array. */ |
uintptr_t __len; |
}; |
|
/* The type descriptor for a slice. */ |
|
struct __go_slice_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* The element type. */ |
struct __go_type_descriptor *__element_type; |
}; |
|
/* The direction of a channel. */ |
#define CHANNEL_RECV_DIR 1 |
#define CHANNEL_SEND_DIR 2 |
#define CHANNEL_BOTH_DIR (CHANNEL_RECV_DIR | CHANNEL_SEND_DIR) |
|
/* The type descriptor for a channel. */ |
|
struct __go_channel_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* The element type. */ |
const struct __go_type_descriptor *__element_type; |
|
/* The direction. */ |
uintptr_t __dir; |
}; |
|
/* The type descriptor for a function. */ |
|
struct __go_func_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* Whether this is a varargs function. If this is true, there will |
be at least one parameter. For "..." the last parameter type is |
"interface{}". For "... T" the last parameter type is "[]T". */ |
_Bool __dotdotdot; |
|
/* The input parameter types. This is an array of pointers to |
struct __go_type_descriptor. */ |
struct __go_open_array __in; |
|
/* The output parameter types. This is an array of pointers to |
struct __go_type_descriptor. */ |
struct __go_open_array __out; |
}; |
|
/* A method on an interface type. */ |
|
struct __go_interface_method |
{ |
/* The name of the method. */ |
const struct __go_string *__name; |
|
/* This is NULL for an exported method, or the name of the package |
where it lives. */ |
const struct __go_string *__pkg_path; |
|
/* The real type of the method. */ |
struct __go_type_descriptor *__type; |
}; |
|
/* An interface type. */ |
|
struct __go_interface_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* Array of __go_interface_method . The methods are sorted in the |
same order that they appear in the definition of the |
interface. */ |
struct __go_open_array __methods; |
}; |
|
/* A map type. */ |
|
struct __go_map_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* The map key type. */ |
const struct __go_type_descriptor *__key_type; |
|
/* The map value type. */ |
const struct __go_type_descriptor *__val_type; |
}; |
|
/* A pointer type. */ |
|
struct __go_ptr_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* The type to which this points. */ |
const struct __go_type_descriptor *__element_type; |
}; |
|
/* A field in a structure. */ |
|
struct __go_struct_field |
{ |
/* The name of the field--NULL for an anonymous field. */ |
const struct __go_string *__name; |
|
/* This is NULL for an exported method, or the name of the package |
where it lives. */ |
const struct __go_string *__pkg_path; |
|
/* The type of the field. */ |
const struct __go_type_descriptor *__type; |
|
/* The field tag, or NULL. */ |
const struct __go_string *__tag; |
|
/* The offset of the field in the struct. */ |
uintptr_t __offset; |
}; |
|
/* A struct type. */ |
|
struct __go_struct_type |
{ |
/* Starts like all other type descriptors. */ |
struct __go_type_descriptor __common; |
|
/* An array of struct __go_struct_field. */ |
struct __go_open_array __fields; |
}; |
|
/* If an empty interface has these bits set in its type pointer, it |
was copied from a reflect.Value and is not a valid empty |
interface. */ |
|
enum |
{ |
reflectFlags = 3, |
}; |
|
/* Whether a type descriptor is a pointer. */ |
|
static inline _Bool |
__go_is_pointer_type (const struct __go_type_descriptor *td) |
{ |
return td->__code == GO_PTR || td->__code == GO_UNSAFE_POINTER; |
} |
|
extern _Bool |
__go_type_descriptors_equal(const struct __go_type_descriptor*, |
const struct __go_type_descriptor*); |
|
extern uintptr_t __go_type_hash_identity (const void *, uintptr_t); |
extern _Bool __go_type_equal_identity (const void *, const void *, uintptr_t); |
extern uintptr_t __go_type_hash_string (const void *, uintptr_t); |
extern _Bool __go_type_equal_string (const void *, const void *, uintptr_t); |
extern uintptr_t __go_type_hash_float (const void *, uintptr_t); |
extern _Bool __go_type_equal_float (const void *, const void *, uintptr_t); |
extern uintptr_t __go_type_hash_complex (const void *, uintptr_t); |
extern _Bool __go_type_equal_complex (const void *, const void *, uintptr_t); |
extern uintptr_t __go_type_hash_interface (const void *, uintptr_t); |
extern _Bool __go_type_equal_interface (const void *, const void *, uintptr_t); |
extern uintptr_t __go_type_hash_error (const void *, uintptr_t); |
extern _Bool __go_type_equal_error (const void *, const void *, uintptr_t); |
|
#endif /* !defined(LIBGO_GO_TYPE_H) */ |
/go-type-complex.c
0,0 → 1,122
/* go-type-complex.c -- hash and equality complex functions. |
|
Copyright 2012 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-type.h" |
|
/* The 64-bit type. */ |
|
typedef unsigned int DItype __attribute__ ((mode (DI))); |
|
/* Hash function for float types. */ |
|
uintptr_t |
__go_type_hash_complex (const void *vkey, uintptr_t key_size) |
{ |
if (key_size == 8) |
{ |
union |
{ |
unsigned char a[8]; |
__complex float cf; |
DItype di; |
} ucf; |
__complex float cf; |
float cfr; |
float cfi; |
|
__builtin_memcpy (ucf.a, vkey, 8); |
cf = ucf.cf; |
cfr = __builtin_crealf (cf); |
cfi = __builtin_cimagf (cf); |
if (__builtin_isinff (cfr) || __builtin_isinff (cfi) |
|| __builtin_isnanf (cfr) || __builtin_isnanf (cfi)) |
return 0; |
|
/* Avoid negative zero. */ |
if (cfr == 0 && cfi == 0) |
return 0; |
else if (cfr == 0) |
ucf.cf = cfi * 1.0iF; |
else if (cfi == 0) |
ucf.cf = cfr; |
|
return ucf.di; |
} |
else if (key_size == 16) |
{ |
union |
{ |
unsigned char a[16]; |
__complex double cd; |
DItype adi[2]; |
} ucd; |
__complex double cd; |
double cdr; |
double cdi; |
|
__builtin_memcpy (ucd.a, vkey, 16); |
cd = ucd.cd; |
cdr = __builtin_crealf (cd); |
cdi = __builtin_cimagf (cd); |
if (__builtin_isinf (cdr) || __builtin_isinf (cdi) |
|| __builtin_isnan (cdr) || __builtin_isnan (cdi)) |
return 0; |
|
/* Avoid negative zero. */ |
if (cdr == 0 && cdi == 0) |
return 0; |
else if (cdr == 0) |
ucd.cd = cdi * 1.0i; |
else if (cdi == 0) |
ucd.cd = cdr; |
|
return ucd.adi[0] ^ ucd.adi[1]; |
} |
else |
runtime_throw ("__go_type_hash_complex: invalid complex size"); |
} |
|
/* Equality function for complex types. */ |
|
_Bool |
__go_type_equal_complex (const void *vk1, const void *vk2, uintptr_t key_size) |
{ |
if (key_size == 8) |
{ |
union |
{ |
unsigned char a[8]; |
__complex float cf; |
} ucf; |
__complex float cf1; |
__complex float cf2; |
|
__builtin_memcpy (ucf.a, vk1, 8); |
cf1 = ucf.cf; |
__builtin_memcpy (ucf.a, vk2, 8); |
cf2 = ucf.cf; |
return cf1 == cf2; |
} |
else if (key_size == 16) |
{ |
union |
{ |
unsigned char a[16]; |
__complex double cd; |
} ucd; |
__complex double cd1; |
__complex double cd2; |
|
__builtin_memcpy (ucd.a, vk1, 16); |
cd1 = ucd.cd; |
__builtin_memcpy (ucd.a, vk2, 16); |
cd2 = ucd.cd; |
return cd1 == cd2; |
} |
else |
runtime_throw ("__go_type_equal_complex: invalid complex size"); |
} |
/go-new.c
0,0 → 1,22
/* go-new.c -- the generic go new() function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-alloc.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
void * |
__go_new (uintptr_t size) |
{ |
return runtime_mallocgc (size, 0, 1, 1); |
} |
|
void * |
__go_new_nopointers (uintptr_t size) |
{ |
return runtime_mallocgc (size, FlagNoPointers, 1, 1); |
} |
/malloc.goc
0,0 → 1,474
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// See malloc.h for overview. |
// |
// TODO(rsc): double-check stats. |
|
package runtime |
#include <stddef.h> |
#include <errno.h> |
#include <stdlib.h> |
#include "go-alloc.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
#include "go-string.h" |
#include "interface.h" |
#include "go-type.h" |
|
MHeap runtime_mheap; |
extern MStats mstats; // defined in extern.go |
|
extern volatile int32 runtime_MemProfileRate |
__asm__ ("libgo_runtime.runtime.MemProfileRate"); |
|
// Allocate an object of at least size bytes. |
// Small objects are allocated from the per-thread cache's free lists. |
// Large objects (> 32 kB) are allocated straight from the heap. |
void* |
runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed) |
{ |
M *m; |
G *g; |
int32 sizeclass, rate; |
MCache *c; |
uintptr npages; |
MSpan *s; |
void *v; |
|
m = runtime_m(); |
g = runtime_g(); |
if(g->status == Gsyscall) |
dogc = 0; |
if(runtime_gcwaiting && g != m->g0 && m->locks == 0 && g->status != Gsyscall) { |
runtime_gosched(); |
m = runtime_m(); |
} |
if(m->mallocing) |
runtime_throw("malloc/free - deadlock"); |
m->mallocing = 1; |
if(size == 0) |
size = 1; |
|
c = m->mcache; |
c->local_nmalloc++; |
if(size <= MaxSmallSize) { |
// Allocate from mcache free lists. |
sizeclass = runtime_SizeToClass(size); |
size = runtime_class_to_size[sizeclass]; |
v = runtime_MCache_Alloc(c, sizeclass, size, zeroed); |
if(v == nil) |
runtime_throw("out of memory"); |
c->local_alloc += size; |
c->local_total_alloc += size; |
c->local_by_size[sizeclass].nmalloc++; |
} else { |
// TODO(rsc): Report tracebacks for very large allocations. |
|
// Allocate directly from heap. |
npages = size >> PageShift; |
if((size & PageMask) != 0) |
npages++; |
s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, !(flag & FlagNoGC)); |
if(s == nil) |
runtime_throw("out of memory"); |
size = npages<<PageShift; |
c->local_alloc += size; |
c->local_total_alloc += size; |
v = (void*)(s->start << PageShift); |
|
// setup for mark sweep |
runtime_markspan(v, 0, 0, true); |
} |
if(!(flag & FlagNoGC)) |
runtime_markallocated(v, size, (flag&FlagNoPointers) != 0); |
|
m->mallocing = 0; |
|
if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) { |
if(size >= (uint32) rate) |
goto profile; |
if((uint32) m->mcache->next_sample > size) |
m->mcache->next_sample -= size; |
else { |
// pick next profile time |
// If you change this, also change allocmcache. |
if(rate > 0x3fffffff) // make 2*rate not overflow |
rate = 0x3fffffff; |
m->mcache->next_sample = runtime_fastrand1() % (2*rate); |
profile: |
runtime_setblockspecial(v, true); |
runtime_MProf_Malloc(v, size); |
} |
} |
|
if(dogc && mstats.heap_alloc >= mstats.next_gc) |
runtime_gc(0); |
return v; |
} |
|
void* |
__go_alloc(uintptr size) |
{ |
return runtime_mallocgc(size, 0, 0, 1); |
} |
|
// Free the object whose base pointer is v. |
void |
__go_free(void *v) |
{ |
M *m; |
int32 sizeclass; |
MSpan *s; |
MCache *c; |
uint32 prof; |
uintptr size; |
|
if(v == nil) |
return; |
|
// If you change this also change mgc0.c:/^sweep, |
// which has a copy of the guts of free. |
|
m = runtime_m(); |
if(m->mallocing) |
runtime_throw("malloc/free - deadlock"); |
m->mallocing = 1; |
|
if(!runtime_mlookup(v, nil, nil, &s)) { |
// runtime_printf("free %p: not an allocated block\n", v); |
runtime_throw("free runtime_mlookup"); |
} |
prof = runtime_blockspecial(v); |
|
// Find size class for v. |
sizeclass = s->sizeclass; |
c = m->mcache; |
if(sizeclass == 0) { |
// Large object. |
size = s->npages<<PageShift; |
*(uintptr*)(s->start<<PageShift) = 1; // mark as "needs to be zeroed" |
// Must mark v freed before calling unmarkspan and MHeap_Free: |
// they might coalesce v into other spans and change the bitmap further. |
runtime_markfreed(v, size); |
runtime_unmarkspan(v, 1<<PageShift); |
runtime_MHeap_Free(&runtime_mheap, s, 1); |
} else { |
// Small object. |
size = runtime_class_to_size[sizeclass]; |
if(size > sizeof(uintptr)) |
((uintptr*)v)[1] = 1; // mark as "needs to be zeroed" |
// Must mark v freed before calling MCache_Free: |
// it might coalesce v and other blocks into a bigger span |
// and change the bitmap further. |
runtime_markfreed(v, size); |
c->local_by_size[sizeclass].nfree++; |
runtime_MCache_Free(c, v, sizeclass, size); |
} |
c->local_alloc -= size; |
if(prof) |
runtime_MProf_Free(v, size); |
m->mallocing = 0; |
} |
|
int32 |
runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp) |
{ |
uintptr n, i; |
byte *p; |
MSpan *s; |
|
runtime_m()->mcache->local_nlookup++; |
s = runtime_MHeap_LookupMaybe(&runtime_mheap, v); |
if(sp) |
*sp = s; |
if(s == nil) { |
runtime_checkfreed(v, 1); |
if(base) |
*base = nil; |
if(size) |
*size = 0; |
return 0; |
} |
|
p = (byte*)((uintptr)s->start<<PageShift); |
if(s->sizeclass == 0) { |
// Large object. |
if(base) |
*base = p; |
if(size) |
*size = s->npages<<PageShift; |
return 1; |
} |
|
if((byte*)v >= (byte*)s->limit) { |
// pointers past the last block do not count as pointers. |
return 0; |
} |
|
n = runtime_class_to_size[s->sizeclass]; |
if(base) { |
i = ((byte*)v - p)/n; |
*base = p + i*n; |
} |
if(size) |
*size = n; |
|
return 1; |
} |
|
MCache* |
runtime_allocmcache(void) |
{ |
int32 rate; |
MCache *c; |
|
runtime_lock(&runtime_mheap); |
c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc); |
mstats.mcache_inuse = runtime_mheap.cachealloc.inuse; |
mstats.mcache_sys = runtime_mheap.cachealloc.sys; |
runtime_unlock(&runtime_mheap); |
|
// Set first allocation sample size. |
rate = runtime_MemProfileRate; |
if(rate > 0x3fffffff) // make 2*rate not overflow |
rate = 0x3fffffff; |
if(rate != 0) |
c->next_sample = runtime_fastrand1() % (2*rate); |
|
return c; |
} |
|
void |
runtime_purgecachedstats(M* m) |
{ |
MCache *c; |
|
// Protected by either heap or GC lock. |
c = m->mcache; |
mstats.heap_alloc += c->local_cachealloc; |
c->local_cachealloc = 0; |
mstats.heap_objects += c->local_objects; |
c->local_objects = 0; |
mstats.nmalloc += c->local_nmalloc; |
c->local_nmalloc = 0; |
mstats.nfree += c->local_nfree; |
c->local_nfree = 0; |
mstats.nlookup += c->local_nlookup; |
c->local_nlookup = 0; |
mstats.alloc += c->local_alloc; |
c->local_alloc= 0; |
mstats.total_alloc += c->local_total_alloc; |
c->local_total_alloc= 0; |
} |
|
extern uintptr runtime_sizeof_C_MStats |
__asm__ ("libgo_runtime.runtime.Sizeof_C_MStats"); |
|
#define MaxArena32 (2U<<30) |
|
void |
runtime_mallocinit(void) |
{ |
byte *p; |
uintptr arena_size, bitmap_size; |
extern byte end[]; |
byte *want; |
|
runtime_sizeof_C_MStats = sizeof(MStats); |
|
runtime_InitSizes(); |
|
// Set up the allocation arena, a contiguous area of memory where |
// allocated data will be found. The arena begins with a bitmap large |
// enough to hold 4 bits per allocated word. |
if(sizeof(void*) == 8) { |
// On a 64-bit machine, allocate from a single contiguous reservation. |
// 16 GB should be big enough for now. |
// |
// The code will work with the reservation at any address, but ask |
// SysReserve to use 0x000000f800000000 if possible. |
// Allocating a 16 GB region takes away 36 bits, and the amd64 |
// doesn't let us choose the top 17 bits, so that leaves the 11 bits |
// in the middle of 0x00f8 for us to choose. Choosing 0x00f8 means |
// that the valid memory addresses will begin 0x00f8, 0x00f9, 0x00fa, 0x00fb. |
// None of the bytes f8 f9 fa fb can appear in valid UTF-8, and |
// they are otherwise as far from ff (likely a common byte) as possible. |
// Choosing 0x00 for the leading 6 bits was more arbitrary, but it |
// is not a common ASCII code point either. Using 0x11f8 instead |
// caused out of memory errors on OS X during thread allocations. |
// These choices are both for debuggability and to reduce the |
// odds of the conservative garbage collector not collecting memory |
// because some non-pointer block of memory had a bit pattern |
// that matched a memory address. |
// |
// Actually we reserve 17 GB (because the bitmap ends up being 1 GB) |
// but it hardly matters: fc is not valid UTF-8 either, and we have to |
// allocate 15 GB before we get that far. |
arena_size = (uintptr)(16LL<<30); |
bitmap_size = arena_size / (sizeof(void*)*8/4); |
p = runtime_SysReserve((void*)(0x00f8ULL<<32), bitmap_size + arena_size); |
if(p == nil) |
runtime_throw("runtime: cannot reserve arena virtual address space"); |
} else { |
// On a 32-bit machine, we can't typically get away |
// with a giant virtual address space reservation. |
// Instead we map the memory information bitmap |
// immediately after the data segment, large enough |
// to handle another 2GB of mappings (256 MB), |
// along with a reservation for another 512 MB of memory. |
// When that gets used up, we'll start asking the kernel |
// for any memory anywhere and hope it's in the 2GB |
// following the bitmap (presumably the executable begins |
// near the bottom of memory, so we'll have to use up |
// most of memory before the kernel resorts to giving out |
// memory before the beginning of the text segment). |
// |
// Alternatively we could reserve 512 MB bitmap, enough |
// for 4GB of mappings, and then accept any memory the |
// kernel threw at us, but normally that's a waste of 512 MB |
// of address space, which is probably too much in a 32-bit world. |
bitmap_size = MaxArena32 / (sizeof(void*)*8/4); |
arena_size = 512<<20; |
|
// SysReserve treats the address we ask for, end, as a hint, |
// not as an absolute requirement. If we ask for the end |
// of the data segment but the operating system requires |
// a little more space before we can start allocating, it will |
// give out a slightly higher pointer. Except QEMU, which |
// is buggy, as usual: it won't adjust the pointer upward. |
// So adjust it upward a little bit ourselves: 1/4 MB to get |
// away from the running binary image and then round up |
// to a MB boundary. |
want = (byte*)(((uintptr)end + (1<<18) + (1<<20) - 1)&~((1<<20)-1)); |
if(0xffffffff - (uintptr)want <= bitmap_size + arena_size) |
want = 0; |
p = runtime_SysReserve(want, bitmap_size + arena_size); |
if(p == nil) |
runtime_throw("runtime: cannot reserve arena virtual address space"); |
} |
if((uintptr)p & (((uintptr)1<<PageShift)-1)) |
runtime_throw("runtime: SysReserve returned unaligned address"); |
|
runtime_mheap.bitmap = p; |
runtime_mheap.arena_start = p + bitmap_size; |
runtime_mheap.arena_used = runtime_mheap.arena_start; |
runtime_mheap.arena_end = runtime_mheap.arena_start + arena_size; |
|
// Initialize the rest of the allocator. |
runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc); |
runtime_m()->mcache = runtime_allocmcache(); |
|
// See if it works. |
runtime_free(runtime_malloc(1)); |
} |
|
void* |
runtime_MHeap_SysAlloc(MHeap *h, uintptr n) |
{ |
byte *p; |
|
if(n <= (uintptr)(h->arena_end - h->arena_used)) { |
// Keep taking from our reservation. |
p = h->arena_used; |
runtime_SysMap(p, n); |
h->arena_used += n; |
runtime_MHeap_MapBits(h); |
return p; |
} |
|
// On 64-bit, our reservation is all we have. |
if(sizeof(void*) == 8) |
return nil; |
|
// On 32-bit, once the reservation is gone we can |
// try to get memory at a location chosen by the OS |
// and hope that it is in the range we allocated bitmap for. |
p = runtime_SysAlloc(n); |
if(p == nil) |
return nil; |
|
if(p < h->arena_start || (uintptr)(p+n - h->arena_start) >= MaxArena32) { |
runtime_printf("runtime: memory allocated by OS not in usable range\n"); |
runtime_SysFree(p, n); |
return nil; |
} |
|
if(p+n > h->arena_used) { |
h->arena_used = p+n; |
if(h->arena_used > h->arena_end) |
h->arena_end = h->arena_used; |
runtime_MHeap_MapBits(h); |
} |
|
return p; |
} |
|
// Runtime stubs. |
|
void* |
runtime_mal(uintptr n) |
{ |
return runtime_mallocgc(n, 0, 1, 1); |
} |
|
func new(typ *Type) (ret *uint8) { |
uint32 flag = typ->__code&GO_NO_POINTERS ? FlagNoPointers : 0; |
ret = runtime_mallocgc(typ->__size, flag, 1, 1); |
} |
|
func Alloc(n uintptr) (p *byte) { |
p = runtime_malloc(n); |
} |
|
func Free(p *byte) { |
runtime_free(p); |
} |
|
func Lookup(p *byte) (base *byte, size uintptr) { |
runtime_mlookup(p, &base, &size, nil); |
} |
|
func GC() { |
runtime_gc(1); |
} |
|
func SetFinalizer(obj Eface, finalizer Eface) { |
byte *base; |
uintptr size; |
const FuncType *ft; |
|
if(obj.__type_descriptor == nil) { |
// runtime·printf("runtime.SetFinalizer: first argument is nil interface\n"); |
goto throw; |
} |
if(obj.__type_descriptor->__code != GO_PTR) { |
// runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string); |
goto throw; |
} |
if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) { |
// runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n"); |
goto throw; |
} |
ft = nil; |
if(finalizer.__type_descriptor != nil) { |
if(finalizer.__type_descriptor->__code != GO_FUNC) |
goto badfunc; |
ft = (const FuncType*)finalizer.__type_descriptor; |
if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor)) |
goto badfunc; |
} |
|
if(!runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft)) { |
runtime_printf("runtime.SetFinalizer: finalizer already set\n"); |
goto throw; |
} |
return; |
|
badfunc: |
// runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string); |
throw: |
runtime_throw("runtime.SetFinalizer"); |
} |
/goc2c.c
0,0 → 1,782
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// +build ignore |
|
/* |
* Translate a .goc file into a .c file. A .goc file is a combination |
* of a limited form of Go with C. |
*/ |
|
/* |
package PACKAGENAME |
{# line} |
func NAME([NAME TYPE { , NAME TYPE }]) [(NAME TYPE { , NAME TYPE })] \{ |
C code with proper brace nesting |
\} |
*/ |
|
/* |
* We generate C code which implements the function such that it can |
* be called from Go and executes the C code. |
*/ |
|
#include <assert.h> |
#include <ctype.h> |
#include <stdarg.h> |
#include <stdio.h> |
#include <stdlib.h> |
#include <string.h> |
#include <errno.h> |
|
/* Whether we're emitting for gcc */ |
static int gcc; |
|
/* Package prefix to use; only meaningful for gcc */ |
static const char *prefix; |
|
/* File and line number */ |
static const char *file; |
static unsigned int lineno = 1; |
|
/* List of names and types. */ |
struct params { |
struct params *next; |
char *name; |
char *type; |
}; |
|
/* index into type_table */ |
enum { |
Bool, |
Float, |
Int, |
Uint, |
Uintptr, |
String, |
Slice, |
Eface, |
}; |
|
static struct { |
char *name; |
int size; |
} type_table[] = { |
/* variable sized first, for easy replacement */ |
/* order matches enum above */ |
/* default is 32-bit architecture sizes */ |
"bool", 1, |
"float", 4, |
"int", 4, |
"uint", 4, |
"uintptr", 4, |
"String", 8, |
"Slice", 12, |
"Eface", 8, |
|
/* fixed size */ |
"float32", 4, |
"float64", 8, |
"byte", 1, |
"int8", 1, |
"uint8", 1, |
"int16", 2, |
"uint16", 2, |
"int32", 4, |
"uint32", 4, |
"int64", 8, |
"uint64", 8, |
|
NULL, |
}; |
|
/* Fixed structure alignment (non-gcc only) */ |
int structround = 4; |
|
char *argv0; |
|
static void |
sysfatal(char *fmt, ...) |
{ |
char buf[256]; |
va_list arg; |
|
va_start(arg, fmt); |
vsnprintf(buf, sizeof buf, fmt, arg); |
va_end(arg); |
|
fprintf(stderr, "%s: %s\n", argv0 ? argv0 : "<prog>", buf); |
exit(1); |
} |
|
/* Unexpected EOF. */ |
static void |
bad_eof(void) |
{ |
sysfatal("%s:%ud: unexpected EOF\n", file, lineno); |
} |
|
/* Out of memory. */ |
static void |
bad_mem(void) |
{ |
sysfatal("%s:%ud: out of memory\n", file, lineno); |
} |
|
/* Allocate memory without fail. */ |
static void * |
xmalloc(unsigned int size) |
{ |
void *ret = malloc(size); |
if (ret == NULL) |
bad_mem(); |
return ret; |
} |
|
/* Reallocate memory without fail. */ |
static void* |
xrealloc(void *buf, unsigned int size) |
{ |
void *ret = realloc(buf, size); |
if (ret == NULL) |
bad_mem(); |
return ret; |
} |
|
/* Free a list of parameters. */ |
static void |
free_params(struct params *p) |
{ |
while (p != NULL) { |
struct params *next; |
|
next = p->next; |
free(p->name); |
free(p->type); |
free(p); |
p = next; |
} |
} |
|
/* Read a character, tracking lineno. */ |
static int |
getchar_update_lineno(void) |
{ |
int c; |
|
c = getchar(); |
if (c == '\n') |
++lineno; |
return c; |
} |
|
/* Read a character, giving an error on EOF, tracking lineno. */ |
static int |
getchar_no_eof(void) |
{ |
int c; |
|
c = getchar_update_lineno(); |
if (c == EOF) |
bad_eof(); |
return c; |
} |
|
/* Read a character, skipping comments. */ |
static int |
getchar_skipping_comments(void) |
{ |
int c; |
|
while (1) { |
c = getchar_update_lineno(); |
if (c != '/') |
return c; |
|
c = getchar(); |
if (c == '/') { |
do { |
c = getchar_update_lineno(); |
} while (c != EOF && c != '\n'); |
return c; |
} else if (c == '*') { |
while (1) { |
c = getchar_update_lineno(); |
if (c == EOF) |
return EOF; |
if (c == '*') { |
do { |
c = getchar_update_lineno(); |
} while (c == '*'); |
if (c == '/') |
break; |
} |
} |
} else { |
ungetc(c, stdin); |
return '/'; |
} |
} |
} |
|
/* |
* Read and return a token. Tokens are string or character literals |
* or else delimited by whitespace or by [(),{}]. |
* The latter are all returned as single characters. |
*/ |
static char * |
read_token(void) |
{ |
int c, q; |
char *buf; |
unsigned int alc, off; |
const char* delims = "(),{}"; |
|
while (1) { |
c = getchar_skipping_comments(); |
if (c == EOF) |
return NULL; |
if (!isspace(c)) |
break; |
} |
alc = 16; |
buf = xmalloc(alc + 1); |
off = 0; |
if(c == '"' || c == '\'') { |
q = c; |
buf[off] = c; |
++off; |
while (1) { |
if (off+2 >= alc) { // room for c and maybe next char |
alc *= 2; |
buf = xrealloc(buf, alc + 1); |
} |
c = getchar_no_eof(); |
buf[off] = c; |
++off; |
if(c == q) |
break; |
if(c == '\\') { |
buf[off] = getchar_no_eof(); |
++off; |
} |
} |
} else if (strchr(delims, c) != NULL) { |
buf[off] = c; |
++off; |
} else { |
while (1) { |
if (off >= alc) { |
alc *= 2; |
buf = xrealloc(buf, alc + 1); |
} |
buf[off] = c; |
++off; |
c = getchar_skipping_comments(); |
if (c == EOF) |
break; |
if (isspace(c) || strchr(delims, c) != NULL) { |
if (c == '\n') |
lineno--; |
ungetc(c, stdin); |
break; |
} |
} |
} |
buf[off] = '\0'; |
return buf; |
} |
|
/* Read a token, giving an error on EOF. */ |
static char * |
read_token_no_eof(void) |
{ |
char *token = read_token(); |
if (token == NULL) |
bad_eof(); |
return token; |
} |
|
/* Read the package clause, and return the package name. */ |
static char * |
read_package(void) |
{ |
char *token; |
|
token = read_token_no_eof(); |
if (token == NULL) |
sysfatal("%s:%ud: no token\n", file, lineno); |
if (strcmp(token, "package") != 0) { |
sysfatal("%s:%ud: expected \"package\", got \"%s\"\n", |
file, lineno, token); |
} |
return read_token_no_eof(); |
} |
|
/* Read and copy preprocessor lines. */ |
static void |
read_preprocessor_lines(void) |
{ |
while (1) { |
int c; |
|
do { |
c = getchar_skipping_comments(); |
} while (isspace(c)); |
if (c != '#') { |
ungetc(c, stdin); |
break; |
} |
putchar(c); |
do { |
c = getchar_update_lineno(); |
putchar(c); |
} while (c != '\n'); |
} |
} |
|
/* |
* Read a type in Go syntax and return a type in C syntax. We only |
* permit basic types and pointers. |
*/ |
static char * |
read_type(void) |
{ |
char *p, *op, *q; |
int pointer_count; |
unsigned int len; |
|
p = read_token_no_eof(); |
if (*p != '*') |
return p; |
op = p; |
pointer_count = 0; |
while (*p == '*') { |
++pointer_count; |
++p; |
} |
len = strlen(p); |
q = xmalloc(len + pointer_count + 1); |
memcpy(q, p, len); |
while (pointer_count > 0) { |
q[len] = '*'; |
++len; |
--pointer_count; |
} |
q[len] = '\0'; |
free(op); |
return q; |
} |
|
/* Return the size of the given type. */ |
static int |
type_size(char *p) |
{ |
int i; |
|
if(p[strlen(p)-1] == '*') |
return type_table[Uintptr].size; |
|
for(i=0; type_table[i].name; i++) |
if(strcmp(type_table[i].name, p) == 0) |
return type_table[i].size; |
if(!gcc) { |
sysfatal("%s:%ud: unknown type %s\n", file, lineno, p); |
} |
return 1; |
} |
|
/* |
* Read a list of parameters. Each parameter is a name and a type. |
* The list ends with a ')'. We have already read the '('. |
*/ |
static struct params * |
read_params(int *poffset) |
{ |
char *token; |
struct params *ret, **pp, *p; |
int offset, size, rnd; |
|
ret = NULL; |
pp = &ret; |
token = read_token_no_eof(); |
offset = 0; |
if (strcmp(token, ")") != 0) { |
while (1) { |
p = xmalloc(sizeof(struct params)); |
p->name = token; |
p->type = read_type(); |
p->next = NULL; |
*pp = p; |
pp = &p->next; |
|
size = type_size(p->type); |
rnd = size; |
if(rnd > structround) |
rnd = structround; |
if(offset%rnd) |
offset += rnd - offset%rnd; |
offset += size; |
|
token = read_token_no_eof(); |
if (strcmp(token, ",") != 0) |
break; |
token = read_token_no_eof(); |
} |
} |
if (strcmp(token, ")") != 0) { |
sysfatal("%s:%ud: expected '('\n", |
file, lineno); |
} |
if (poffset != NULL) |
*poffset = offset; |
return ret; |
} |
|
/* |
* Read a function header. This reads up to and including the initial |
* '{' character. Returns 1 if it read a header, 0 at EOF. |
*/ |
static int |
read_func_header(char **name, struct params **params, int *paramwid, struct params **rets) |
{ |
int lastline; |
char *token; |
|
lastline = -1; |
while (1) { |
token = read_token(); |
if (token == NULL) |
return 0; |
if (strcmp(token, "func") == 0) { |
if(lastline != -1) |
printf("\n"); |
break; |
} |
if (lastline != lineno) { |
if (lastline == lineno-1) |
printf("\n"); |
else |
printf("\n#line %d \"%s\"\n", lineno, file); |
lastline = lineno; |
} |
printf("%s ", token); |
} |
|
*name = read_token_no_eof(); |
|
token = read_token(); |
if (token == NULL || strcmp(token, "(") != 0) { |
sysfatal("%s:%ud: expected \"(\"\n", |
file, lineno); |
} |
*params = read_params(paramwid); |
|
token = read_token(); |
if (token == NULL || strcmp(token, "(") != 0) |
*rets = NULL; |
else { |
*rets = read_params(NULL); |
token = read_token(); |
} |
if (token == NULL || strcmp(token, "{") != 0) { |
sysfatal("%s:%ud: expected \"{\"\n", |
file, lineno); |
} |
return 1; |
} |
|
/* Write out parameters. */ |
static void |
write_params(struct params *params, int *first) |
{ |
struct params *p; |
|
for (p = params; p != NULL; p = p->next) { |
if (*first) |
*first = 0; |
else |
printf(", "); |
printf("%s %s", p->type, p->name); |
} |
} |
|
/* Write a 6g function header. */ |
static void |
write_6g_func_header(char *package, char *name, struct params *params, |
int paramwid, struct params *rets) |
{ |
int first, n; |
|
printf("void\n%s·%s(", package, name); |
first = 1; |
write_params(params, &first); |
|
/* insert padding to align output struct */ |
if(rets != NULL && paramwid%structround != 0) { |
n = structround - paramwid%structround; |
if(n & 1) |
printf(", uint8"); |
if(n & 2) |
printf(", uint16"); |
if(n & 4) |
printf(", uint32"); |
} |
|
write_params(rets, &first); |
printf(")\n{\n"); |
} |
|
/* Write a 6g function trailer. */ |
static void |
write_6g_func_trailer(struct params *rets) |
{ |
struct params *p; |
|
for (p = rets; p != NULL; p = p->next) |
printf("\tFLUSH(&%s);\n", p->name); |
printf("}\n"); |
} |
|
/* Define the gcc function return type if necessary. */ |
static void |
define_gcc_return_type(char *package, char *name, struct params *rets) |
{ |
struct params *p; |
|
if (rets == NULL || rets->next == NULL) |
return; |
printf("struct %s_%s_ret {\n", package, name); |
for (p = rets; p != NULL; p = p->next) |
printf(" %s %s;\n", p->type, p->name); |
printf("};\n"); |
} |
|
/* Write out the gcc function return type. */ |
static void |
write_gcc_return_type(char *package, char *name, struct params *rets) |
{ |
if (rets == NULL) |
printf("void"); |
else if (rets->next == NULL) |
printf("%s", rets->type); |
else |
printf("struct %s_%s_ret", package, name); |
} |
|
/* Write out a gcc function header. */ |
static void |
write_gcc_func_header(char *package, char *name, struct params *params, |
struct params *rets) |
{ |
int first; |
struct params *p; |
|
define_gcc_return_type(package, name, rets); |
write_gcc_return_type(package, name, rets); |
printf(" %s_%s(", package, name); |
first = 1; |
write_params(params, &first); |
printf(") asm (\""); |
if (prefix != NULL) |
printf("%s.", prefix); |
printf("%s.%s\");\n", package, name); |
write_gcc_return_type(package, name, rets); |
printf(" %s_%s(", package, name); |
first = 1; |
write_params(params, &first); |
printf(")\n{\n"); |
for (p = rets; p != NULL; p = p->next) |
printf(" %s %s;\n", p->type, p->name); |
} |
|
/* Write out a gcc function trailer. */ |
static void |
write_gcc_func_trailer(char *package, char *name, struct params *rets) |
{ |
if (rets == NULL) |
; |
else if (rets->next == NULL) |
printf("return %s;\n", rets->name); |
else { |
struct params *p; |
|
printf(" {\n struct %s_%s_ret __ret;\n", package, name); |
for (p = rets; p != NULL; p = p->next) |
printf(" __ret.%s = %s;\n", p->name, p->name); |
printf(" return __ret;\n }\n"); |
} |
printf("}\n"); |
} |
|
/* Write out a function header. */ |
static void |
write_func_header(char *package, char *name, |
struct params *params, int paramwid, |
struct params *rets) |
{ |
if (gcc) |
write_gcc_func_header(package, name, params, rets); |
else |
write_6g_func_header(package, name, params, paramwid, rets); |
printf("#line %d \"%s\"\n", lineno, file); |
} |
|
/* Write out a function trailer. */ |
static void |
write_func_trailer(char *package, char *name, |
struct params *rets) |
{ |
if (gcc) |
write_gcc_func_trailer(package, name, rets); |
else |
write_6g_func_trailer(rets); |
} |
|
/* |
* Read and write the body of the function, ending in an unnested } |
* (which is read but not written). |
*/ |
static void |
copy_body(void) |
{ |
int nesting = 0; |
while (1) { |
int c; |
|
c = getchar_no_eof(); |
if (c == '}' && nesting == 0) |
return; |
putchar(c); |
switch (c) { |
default: |
break; |
case '{': |
++nesting; |
break; |
case '}': |
--nesting; |
break; |
case '/': |
c = getchar_update_lineno(); |
putchar(c); |
if (c == '/') { |
do { |
c = getchar_no_eof(); |
putchar(c); |
} while (c != '\n'); |
} else if (c == '*') { |
while (1) { |
c = getchar_no_eof(); |
putchar(c); |
if (c == '*') { |
do { |
c = getchar_no_eof(); |
putchar(c); |
} while (c == '*'); |
if (c == '/') |
break; |
} |
} |
} |
break; |
case '"': |
case '\'': |
{ |
int delim = c; |
do { |
c = getchar_no_eof(); |
putchar(c); |
if (c == '\\') { |
c = getchar_no_eof(); |
putchar(c); |
c = '\0'; |
} |
} while (c != delim); |
} |
break; |
} |
} |
} |
|
/* Process the entire file. */ |
static void |
process_file(void) |
{ |
char *package, *name; |
struct params *params, *rets; |
int paramwid; |
|
package = read_package(); |
read_preprocessor_lines(); |
while (read_func_header(&name, ¶ms, ¶mwid, &rets)) { |
write_func_header(package, name, params, paramwid, rets); |
copy_body(); |
write_func_trailer(package, name, rets); |
free(name); |
free_params(params); |
free_params(rets); |
} |
free(package); |
} |
|
static void |
usage(void) |
{ |
sysfatal("Usage: goc2c [--6g | --gc] [--go-prefix PREFIX] [file]\n"); |
} |
|
void |
main(int argc, char **argv) |
{ |
char *goarch; |
|
argv0 = argv[0]; |
while(argc > 1 && argv[1][0] == '-') { |
if(strcmp(argv[1], "-") == 0) |
break; |
if(strcmp(argv[1], "--6g") == 0) |
gcc = 0; |
else if(strcmp(argv[1], "--gcc") == 0) |
gcc = 1; |
else if (strcmp(argv[1], "--go-prefix") == 0 && argc > 2) { |
prefix = argv[2]; |
argc--; |
argv++; |
} else |
usage(); |
argc--; |
argv++; |
} |
|
if(argc <= 1 || strcmp(argv[1], "-") == 0) { |
file = "<stdin>"; |
process_file(); |
exit(0); |
} |
|
if(argc > 2) |
usage(); |
|
file = argv[1]; |
if(freopen(file, "r", stdin) == 0) { |
sysfatal("open %s: %r\n", file); |
} |
|
if(!gcc) { |
// 6g etc; update size table |
goarch = getenv("GOARCH"); |
if(goarch != NULL && strcmp(goarch, "amd64") == 0) { |
type_table[Uintptr].size = 8; |
type_table[String].size = 16; |
type_table[Slice].size = 8+4+4; |
type_table[Eface].size = 8+8; |
structround = 8; |
} |
} |
|
printf("// AUTO-GENERATED by autogen.sh; DO NOT EDIT\n\n"); |
process_file(); |
exit(0); |
} |
/lock_sema.c
0,0 → 1,226
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// +build darwin netbsd openbsd plan9 windows |
|
#include "runtime.h" |
|
// This implementation depends on OS-specific implementations of |
// |
// uintptr runtime_semacreate(void) |
// Create a semaphore, which will be assigned to m->waitsema. |
// The zero value is treated as absence of any semaphore, |
// so be sure to return a non-zero value. |
// |
// int32 runtime_semasleep(int64 ns) |
// If ns < 0, acquire m->waitsema and return 0. |
// If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds. |
// Return 0 if the semaphore was acquired, -1 if interrupted or timed out. |
// |
// int32 runtime_semawakeup(M *mp) |
// Wake up mp, which is or will soon be sleeping on mp->waitsema. |
// |
|
enum |
{ |
LOCKED = 1, |
|
ACTIVE_SPIN = 4, |
ACTIVE_SPIN_CNT = 30, |
PASSIVE_SPIN = 1, |
}; |
|
void |
runtime_lock(Lock *l) |
{ |
M *m; |
uintptr v; |
uint32 i, spin; |
|
m = runtime_m(); |
if(m->locks++ < 0) |
runtime_throw("runtime_lock: lock count"); |
|
// Speculative grab for lock. |
if(runtime_casp(&l->waitm, nil, (void*)LOCKED)) |
return; |
|
if(m->waitsema == 0) |
m->waitsema = runtime_semacreate(); |
|
// On uniprocessor's, no point spinning. |
// On multiprocessors, spin for ACTIVE_SPIN attempts. |
spin = 0; |
if(runtime_ncpu > 1) |
spin = ACTIVE_SPIN; |
|
for(i=0;; i++) { |
v = (uintptr)runtime_atomicloadp(&l->waitm); |
if((v&LOCKED) == 0) { |
unlocked: |
if(runtime_casp(&l->waitm, (void*)v, (void*)(v|LOCKED))) |
return; |
i = 0; |
} |
if(i<spin) |
runtime_procyield(ACTIVE_SPIN_CNT); |
else if(i<spin+PASSIVE_SPIN) |
runtime_osyield(); |
else { |
// Someone else has it. |
// l->waitm points to a linked list of M's waiting |
// for this lock, chained through m->nextwaitm. |
// Queue this M. |
for(;;) { |
m->nextwaitm = (void*)(v&~LOCKED); |
if(runtime_casp(&l->waitm, (void*)v, (void*)((uintptr)m|LOCKED))) |
break; |
v = (uintptr)runtime_atomicloadp(&l->waitm); |
if((v&LOCKED) == 0) |
goto unlocked; |
} |
if(v&LOCKED) { |
// Queued. Wait. |
runtime_semasleep(-1); |
i = 0; |
} |
} |
} |
} |
|
void |
runtime_unlock(Lock *l) |
{ |
uintptr v; |
M *mp; |
|
if(--runtime_m()->locks < 0) |
runtime_throw("runtime_unlock: lock count"); |
|
for(;;) { |
v = (uintptr)runtime_atomicloadp(&l->waitm); |
if(v == LOCKED) { |
if(runtime_casp(&l->waitm, (void*)LOCKED, nil)) |
break; |
} else { |
// Other M's are waiting for the lock. |
// Dequeue an M. |
mp = (void*)(v&~LOCKED); |
if(runtime_casp(&l->waitm, (void*)v, mp->nextwaitm)) { |
// Dequeued an M. Wake it. |
runtime_semawakeup(mp); |
break; |
} |
} |
} |
} |
|
// One-time notifications. |
void |
runtime_noteclear(Note *n) |
{ |
n->waitm = nil; |
} |
|
void |
runtime_notewakeup(Note *n) |
{ |
M *mp; |
|
do |
mp = runtime_atomicloadp(&n->waitm); |
while(!runtime_casp(&n->waitm, mp, (void*)LOCKED)); |
|
// Successfully set waitm to LOCKED. |
// What was it before? |
if(mp == nil) { |
// Nothing was waiting. Done. |
} else if(mp == (M*)LOCKED) { |
// Two notewakeups! Not allowed. |
runtime_throw("notewakeup - double wakeup"); |
} else { |
// Must be the waiting m. Wake it up. |
runtime_semawakeup(mp); |
} |
} |
|
void |
runtime_notesleep(Note *n) |
{ |
M *m; |
|
m = runtime_m(); |
if(m->waitsema == 0) |
m->waitsema = runtime_semacreate(); |
if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup) |
if(n->waitm != (void*)LOCKED) |
runtime_throw("notesleep - waitm out of sync"); |
return; |
} |
// Queued. Sleep. |
runtime_semasleep(-1); |
} |
|
void |
runtime_notetsleep(Note *n, int64 ns) |
{ |
M *m; |
M *mp; |
int64 deadline, now; |
|
if(ns < 0) { |
runtime_notesleep(n); |
return; |
} |
|
m = runtime_m(); |
if(m->waitsema == 0) |
m->waitsema = runtime_semacreate(); |
|
// Register for wakeup on n->waitm. |
if(!runtime_casp(&n->waitm, nil, m)) { // must be LOCKED (got wakeup already) |
if(n->waitm != (void*)LOCKED) |
runtime_throw("notetsleep - waitm out of sync"); |
return; |
} |
|
deadline = runtime_nanotime() + ns; |
for(;;) { |
// Registered. Sleep. |
if(runtime_semasleep(ns) >= 0) { |
// Acquired semaphore, semawakeup unregistered us. |
// Done. |
return; |
} |
|
// Interrupted or timed out. Still registered. Semaphore not acquired. |
now = runtime_nanotime(); |
if(now >= deadline) |
break; |
|
// Deadline hasn't arrived. Keep sleeping. |
ns = deadline - now; |
} |
|
// Deadline arrived. Still registered. Semaphore not acquired. |
// Want to give up and return, but have to unregister first, |
// so that any notewakeup racing with the return does not |
// try to grant us the semaphore when we don't expect it. |
for(;;) { |
mp = runtime_atomicloadp(&n->waitm); |
if(mp == m) { |
// No wakeup yet; unregister if possible. |
if(runtime_casp(&n->waitm, mp, nil)) |
return; |
} else if(mp == (M*)LOCKED) { |
// Wakeup happened so semaphore is available. |
// Grab it to avoid getting out of sync. |
if(runtime_semasleep(-1) < 0) |
runtime_throw("runtime: unable to acquire - semaphore out of sync"); |
return; |
} else { |
runtime_throw("runtime: unexpected waitm - semaphore out of sync"); |
} |
} |
} |
/go-print.c
0,0 → 1,170
/* go-print.c -- support for the go print statement. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <math.h> |
#include <stdint.h> |
#include <stdio.h> |
|
#include "array.h" |
#include "go-panic.h" |
#include "go-string.h" |
#include "interface.h" |
|
/* This implements the various little functions which are called by |
the predeclared functions print/println/panic/panicln. */ |
|
void |
__go_print_space () |
{ |
putc (' ', stderr); |
} |
|
void |
__go_print_nl () |
{ |
putc ('\n', stderr); |
} |
|
void |
__go_print_string (struct __go_string val) |
{ |
fprintf (stderr, "%.*s", (int) val.__length, (const char *) val.__data); |
} |
|
void |
__go_print_uint64 (uint64_t val) |
{ |
fprintf (stderr, "%llu", (unsigned long long) val); |
} |
|
void |
__go_print_int64 (int64_t val) |
{ |
fprintf (stderr, "%lld", (long long) val); |
} |
|
void |
__go_print_double (double v) |
{ |
char buf[20]; |
int e, s, i, n; |
double h; |
|
if (isnan (v)) |
{ |
fputs ("NaN", stderr); |
return; |
} |
if (__builtin_isinf (v)) |
{ |
putc (v < 0 ? '-' : '+', stderr); |
fputs ("Inf", stderr); |
return; |
} |
|
/* The number of digits printed. */ |
n = 7; |
/* The exponent. */ |
e = 0; |
/* The sign. */ |
s = 0; |
if (v != 0) |
{ |
if (v < 0) |
{ |
v = -v; |
s = 1; |
} |
|
/* Normalize. */ |
while (v >= 10) |
{ |
++e; |
v /= 10; |
} |
while (v < 1) |
{ |
--e; |
v *= 10; |
} |
|
/* Round. */ |
h = 5; |
for (i = 0; i < n; ++i) |
h /= 10; |
|
v += h; |
if (v >= 10) |
{ |
++e; |
v /= 10; |
} |
} |
|
/* The format is +d.dddd+edd. */ |
buf[0] = s ? '-' : '+'; |
for (i = 0; i < n; ++i) |
{ |
int d; |
|
d = v; |
buf[i + 2] = d + '0'; |
v -= d; |
v *= 10; |
} |
buf[1] = buf[2]; |
buf[2] = '.'; |
|
buf[n + 2] = 'e'; |
buf[n + 3] = e < 0 ? '-' : '+'; |
if (e < 0) |
e = - e; |
buf[n + 4] = e / 100 + '0'; |
buf[n + 5] = (e / 10) % 10 + '0'; |
buf[n + 6] = e % 10 + '0'; |
buf[n + 7] = '\0'; |
fputs (buf, stderr); |
} |
|
void |
__go_print_complex (__complex double val) |
{ |
putc ('(', stderr); |
__go_print_double (__builtin_creal (val)); |
__go_print_double (__builtin_cimag (val)); |
fputs ("i)", stderr); |
} |
|
void |
__go_print_bool (_Bool val) |
{ |
fputs (val ? "true" : "false", stderr); |
} |
|
void |
__go_print_pointer (void *val) |
{ |
fprintf (stderr, "0x%lx", (unsigned long) (uintptr_t) val); |
} |
|
void |
__go_print_empty_interface (struct __go_empty_interface e) |
{ |
fprintf (stderr, "(%p,%p)", e.__type_descriptor, e.__object); |
} |
|
void |
__go_print_interface (struct __go_interface i) |
{ |
fprintf (stderr, "(%p,%p)", i.__methods, i.__object); |
} |
|
void |
__go_print_slice (struct __go_open_array val) |
{ |
fprintf (stderr, "[%d/%d]", val.__count, val.__capacity); |
__go_print_pointer (val.__values); |
} |
/go-breakpoint.c
0,0 → 1,15
/* go-breakpoint.c -- the runtime.Breakpoint function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <sched.h> |
|
void Breakpoint (void) asm ("libgo_runtime.runtime.Breakpoint"); |
|
void |
Breakpoint (void) |
{ |
__builtin_trap (); |
} |
/go-assert-interface.c
0,0 → 1,49
/* go-assert-interface.c -- interface type assertion for Go. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-alloc.h" |
#include "go-assert.h" |
#include "go-panic.h" |
#include "interface.h" |
|
/* This is called by the compiler to implement a type assertion from |
one interface type to another. This returns the value that should |
go in the first field of the result tuple. The result may be an |
empty or a non-empty interface. */ |
|
const void * |
__go_assert_interface (const struct __go_type_descriptor *lhs_descriptor, |
const struct __go_type_descriptor *rhs_descriptor) |
{ |
const struct __go_interface_type *lhs_interface; |
|
if (rhs_descriptor == NULL) |
{ |
struct __go_empty_interface panic_arg; |
|
/* A type assertion is not permitted with a nil interface. */ |
|
newTypeAssertionError (NULL, |
NULL, |
lhs_descriptor, |
NULL, |
NULL, |
lhs_descriptor->__reflection, |
NULL, |
&panic_arg); |
__go_panic (panic_arg); |
} |
|
/* A type assertion to an empty interface just returns the object |
descriptor. */ |
|
__go_assert (lhs_descriptor->__code == GO_INTERFACE); |
lhs_interface = (const struct __go_interface_type *) lhs_descriptor; |
if (lhs_interface->__methods.__count == 0) |
return rhs_descriptor; |
|
return __go_convert_interface_2 (lhs_descriptor, rhs_descriptor, 0); |
} |
/go-alloc.h
0,0 → 1,11
/* go-alloc.h -- allocate memory for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdint.h> |
|
extern void *__go_alloc (unsigned int __attribute__ ((mode (pointer)))); |
extern void __go_free (void *); |
/lock_futex.c
0,0 → 1,148
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// +build freebsd linux |
|
#include "runtime.h" |
|
// This implementation depends on OS-specific implementations of |
// |
// runtime_futexsleep(uint32 *addr, uint32 val, int64 ns) |
// Atomically, |
// if(*addr == val) sleep |
// Might be woken up spuriously; that's allowed. |
// Don't sleep longer than ns; ns < 0 means forever. |
// |
// runtime_futexwakeup(uint32 *addr, uint32 cnt) |
// If any procs are sleeping on addr, wake up at most cnt. |
|
enum |
{ |
MUTEX_UNLOCKED = 0, |
MUTEX_LOCKED = 1, |
MUTEX_SLEEPING = 2, |
|
ACTIVE_SPIN = 4, |
ACTIVE_SPIN_CNT = 30, |
PASSIVE_SPIN = 1, |
}; |
|
// Possible lock states are MUTEX_UNLOCKED, MUTEX_LOCKED and MUTEX_SLEEPING. |
// MUTEX_SLEEPING means that there is presumably at least one sleeping thread. |
// Note that there can be spinning threads during all states - they do not |
// affect mutex's state. |
void |
runtime_lock(Lock *l) |
{ |
uint32 i, v, wait, spin; |
|
if(runtime_m()->locks++ < 0) |
runtime_throw("runtime_lock: lock count"); |
|
// Speculative grab for lock. |
v = runtime_xchg(&l->key, MUTEX_LOCKED); |
if(v == MUTEX_UNLOCKED) |
return; |
|
// wait is either MUTEX_LOCKED or MUTEX_SLEEPING |
// depending on whether there is a thread sleeping |
// on this mutex. If we ever change l->key from |
// MUTEX_SLEEPING to some other value, we must be |
// careful to change it back to MUTEX_SLEEPING before |
// returning, to ensure that the sleeping thread gets |
// its wakeup call. |
wait = v; |
|
// On uniprocessor's, no point spinning. |
// On multiprocessors, spin for ACTIVE_SPIN attempts. |
spin = 0; |
if(runtime_ncpu > 1) |
spin = ACTIVE_SPIN; |
|
for(;;) { |
// Try for lock, spinning. |
for(i = 0; i < spin; i++) { |
while(l->key == MUTEX_UNLOCKED) |
if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) |
return; |
runtime_procyield(ACTIVE_SPIN_CNT); |
} |
|
// Try for lock, rescheduling. |
for(i=0; i < PASSIVE_SPIN; i++) { |
while(l->key == MUTEX_UNLOCKED) |
if(runtime_cas(&l->key, MUTEX_UNLOCKED, wait)) |
return; |
runtime_osyield(); |
} |
|
// Sleep. |
v = runtime_xchg(&l->key, MUTEX_SLEEPING); |
if(v == MUTEX_UNLOCKED) |
return; |
wait = MUTEX_SLEEPING; |
runtime_futexsleep(&l->key, MUTEX_SLEEPING, -1); |
} |
} |
|
void |
runtime_unlock(Lock *l) |
{ |
uint32 v; |
|
if(--runtime_m()->locks < 0) |
runtime_throw("runtime_unlock: lock count"); |
|
v = runtime_xchg(&l->key, MUTEX_UNLOCKED); |
if(v == MUTEX_UNLOCKED) |
runtime_throw("unlock of unlocked lock"); |
if(v == MUTEX_SLEEPING) |
runtime_futexwakeup(&l->key, 1); |
} |
|
// One-time notifications. |
void |
runtime_noteclear(Note *n) |
{ |
n->key = 0; |
} |
|
void |
runtime_notewakeup(Note *n) |
{ |
runtime_xchg(&n->key, 1); |
runtime_futexwakeup(&n->key, 1); |
} |
|
void |
runtime_notesleep(Note *n) |
{ |
while(runtime_atomicload(&n->key) == 0) |
runtime_futexsleep(&n->key, 0, -1); |
} |
|
void |
runtime_notetsleep(Note *n, int64 ns) |
{ |
int64 deadline, now; |
|
if(ns < 0) { |
runtime_notesleep(n); |
return; |
} |
|
if(runtime_atomicload(&n->key) != 0) |
return; |
|
deadline = runtime_nanotime() + ns; |
for(;;) { |
runtime_futexsleep(&n->key, 0, ns); |
if(runtime_atomicload(&n->key) != 0) |
return; |
now = runtime_nanotime(); |
if(now >= deadline) |
return; |
ns = deadline - now; |
} |
} |
/go-assert.c
0,0 → 1,18
/* go-assert.c -- libgo specific assertions |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdio.h> |
#include <stdlib.h> |
|
#include "go-assert.h" |
|
void |
__go_assert_fail (const char *file, unsigned int lineno) |
{ |
/* FIXME: Eventually we should dump a stack trace here. */ |
fprintf (stderr, "%s:%u: libgo assertion failure\n", file, lineno); |
abort (); |
} |
/yield.c
0,0 → 1,52
// Copyright 2011 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include "config.h" |
|
#include <stddef.h> |
#include <sys/types.h> |
#include <sys/time.h> |
#include <sched.h> |
#include <unistd.h> |
|
#ifdef HAVE_SYS_SELECT_H |
#include <sys/select.h> |
#endif |
|
#include "runtime.h" |
|
/* Spin wait. */ |
|
void |
runtime_procyield (uint32 cnt) |
{ |
volatile uint32 i; |
|
for (i = 0; i < cnt; ++i) |
{ |
#if defined (__i386__) || defined (__x86_64__) |
__builtin_ia32_pause (); |
#endif |
} |
} |
|
/* Ask the OS to reschedule this thread. */ |
|
void |
runtime_osyield (void) |
{ |
sched_yield (); |
} |
|
/* Sleep for some number of microseconds. */ |
|
void |
runtime_usleep (uint32 us) |
{ |
struct timeval tv; |
|
tv.tv_sec = us / 1000000; |
tv.tv_usec = us % 1000000; |
select (0, NULL, NULL, NULL, &tv); |
} |
/go-interface-val-compare.c
0,0 → 1,32
/* go-interface-val-compare.c -- compare an interface to a value. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-type.h" |
#include "interface.h" |
|
/* Compare two interface values. Return 0 for equal, not zero for not |
equal (return value is like strcmp). */ |
|
int |
__go_interface_value_compare ( |
struct __go_interface left, |
const struct __go_type_descriptor *right_descriptor, |
const void *val) |
{ |
const struct __go_type_descriptor *left_descriptor; |
|
if (left.__methods == NULL) |
return 1; |
left_descriptor = left.__methods[0]; |
if (!__go_type_descriptors_equal (left_descriptor, right_descriptor)) |
return 1; |
if (__go_is_pointer_type (left_descriptor)) |
return left.__object == val ? 0 : 1; |
if (!left_descriptor->__equalfn (left.__object, val, |
left_descriptor->__size)) |
return 1; |
return 0; |
} |
/mem_posix_memalign.c
0,0 → 1,48
#include <errno.h> |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
void* |
runtime_SysAlloc(uintptr n) |
{ |
void *p; |
|
mstats.sys += n; |
errno = posix_memalign(&p, PageSize, n); |
if (errno > 0) { |
perror("posix_memalign"); |
exit(2); |
} |
return p; |
} |
|
void |
runtime_SysUnused(void *v, uintptr n) |
{ |
USED(v); |
USED(n); |
// TODO(rsc): call madvise MADV_DONTNEED |
} |
|
void |
runtime_SysFree(void *v, uintptr n) |
{ |
mstats.sys -= n; |
free(v); |
} |
|
void* |
runtime_SysReserve(void *v, uintptr n) |
{ |
USED(v); |
return runtime_SysAlloc(n); |
} |
|
void |
runtime_SysMap(void *v, uintptr n) |
{ |
USED(v); |
USED(n); |
} |
/mprof.goc
0,0 → 1,294
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Malloc profiling. |
// Patterned after tcmalloc's algorithms; shorter code. |
|
package runtime |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
#include "defs.h" |
#include "go-type.h" |
|
// NOTE(rsc): Everything here could use cas if contention became an issue. |
static Lock proflock; |
|
// Per-call-stack allocation information. |
// Lookup by hashing call stack into a linked-list hash table. |
typedef struct Bucket Bucket; |
struct Bucket |
{ |
Bucket *next; // next in hash list |
Bucket *allnext; // next in list of all buckets |
uintptr allocs; |
uintptr frees; |
uintptr alloc_bytes; |
uintptr free_bytes; |
uintptr hash; |
uintptr nstk; |
uintptr stk[1]; |
}; |
enum { |
BuckHashSize = 179999, |
}; |
static Bucket **buckhash; |
static Bucket *buckets; |
static uintptr bucketmem; |
|
// Return the bucket for stk[0:nstk], allocating new bucket if needed. |
static Bucket* |
stkbucket(uintptr *stk, int32 nstk) |
{ |
int32 i; |
uintptr h; |
Bucket *b; |
|
if(buckhash == nil) { |
buckhash = runtime_SysAlloc(BuckHashSize*sizeof buckhash[0]); |
mstats.buckhash_sys += BuckHashSize*sizeof buckhash[0]; |
} |
|
// Hash stack. |
h = 0; |
for(i=0; i<nstk; i++) { |
h += stk[i]; |
h += h<<10; |
h ^= h>>6; |
} |
h += h<<3; |
h ^= h>>11; |
|
i = h%BuckHashSize; |
for(b = buckhash[i]; b; b=b->next) |
if(b->hash == h && b->nstk == (uintptr)nstk && |
runtime_mcmp((byte*)b->stk, (byte*)stk, nstk*sizeof stk[0]) == 0) |
return b; |
|
b = runtime_mallocgc(sizeof *b + nstk*sizeof stk[0], FlagNoProfiling, 0, 1); |
bucketmem += sizeof *b + nstk*sizeof stk[0]; |
runtime_memmove(b->stk, stk, nstk*sizeof stk[0]); |
b->hash = h; |
b->nstk = nstk; |
b->next = buckhash[i]; |
buckhash[i] = b; |
b->allnext = buckets; |
buckets = b; |
return b; |
} |
|
// Map from pointer to Bucket* that allocated it. |
// Three levels: |
// Linked-list hash table for top N-20 bits. |
// Array index for next 13 bits. |
// Linked list for next 7 bits. |
// This is more efficient than using a general map, |
// because of the typical clustering of the pointer keys. |
|
typedef struct AddrHash AddrHash; |
typedef struct AddrEntry AddrEntry; |
|
struct AddrHash |
{ |
AddrHash *next; // next in top-level hash table linked list |
uintptr addr; // addr>>20 |
AddrEntry *dense[1<<13]; |
}; |
|
struct AddrEntry |
{ |
AddrEntry *next; // next in bottom-level linked list |
uint32 addr; |
Bucket *b; |
}; |
|
enum { |
AddrHashBits = 12 // 1MB per entry, so good for 4GB of used address space |
}; |
static AddrHash *addrhash[1<<AddrHashBits]; |
static AddrEntry *addrfree; |
static uintptr addrmem; |
|
// Multiplicative hash function: |
// hashMultiplier is the bottom 32 bits of int((sqrt(5)-1)/2 * (1<<32)). |
// This is a good multiplier as suggested in CLR, Knuth. The hash |
// value is taken to be the top AddrHashBits bits of the bottom 32 bits |
// of the multiplied value. |
enum { |
HashMultiplier = 2654435769U |
}; |
|
// Set the bucket associated with addr to b. |
static void |
setaddrbucket(uintptr addr, Bucket *b) |
{ |
int32 i; |
uint32 h; |
AddrHash *ah; |
AddrEntry *e; |
|
h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits); |
for(ah=addrhash[h]; ah; ah=ah->next) |
if(ah->addr == (addr>>20)) |
goto found; |
|
ah = runtime_mallocgc(sizeof *ah, FlagNoProfiling, 0, 1); |
addrmem += sizeof *ah; |
ah->next = addrhash[h]; |
ah->addr = addr>>20; |
addrhash[h] = ah; |
|
found: |
if((e = addrfree) == nil) { |
e = runtime_mallocgc(64*sizeof *e, FlagNoProfiling, 0, 0); |
addrmem += 64*sizeof *e; |
for(i=0; i+1<64; i++) |
e[i].next = &e[i+1]; |
e[63].next = nil; |
} |
addrfree = e->next; |
e->addr = (uint32)~(addr & ((1<<20)-1)); |
e->b = b; |
h = (addr>>7)&(nelem(ah->dense)-1); // entry in dense is top 13 bits of low 20. |
e->next = ah->dense[h]; |
ah->dense[h] = e; |
} |
|
// Get the bucket associated with addr and clear the association. |
static Bucket* |
getaddrbucket(uintptr addr) |
{ |
uint32 h; |
AddrHash *ah; |
AddrEntry *e, **l; |
Bucket *b; |
|
h = (uint32)((addr>>20)*HashMultiplier) >> (32-AddrHashBits); |
for(ah=addrhash[h]; ah; ah=ah->next) |
if(ah->addr == (addr>>20)) |
goto found; |
return nil; |
|
found: |
h = (addr>>7)&(nelem(ah->dense)-1); // entry in dense is top 13 bits of low 20. |
for(l=&ah->dense[h]; (e=*l) != nil; l=&e->next) { |
if(e->addr == (uint32)~(addr & ((1<<20)-1))) { |
*l = e->next; |
b = e->b; |
e->next = addrfree; |
addrfree = e; |
return b; |
} |
} |
return nil; |
} |
|
// Called by malloc to record a profiled block. |
void |
runtime_MProf_Malloc(void *p, uintptr size) |
{ |
M *m; |
int32 nstk; |
uintptr stk[32]; |
Bucket *b; |
|
m = runtime_m(); |
if(m->nomemprof > 0) |
return; |
|
m->nomemprof++; |
#if 0 |
nstk = runtime_callers(1, stk, 32); |
#else |
nstk = 0; |
#endif |
runtime_lock(&proflock); |
b = stkbucket(stk, nstk); |
b->allocs++; |
b->alloc_bytes += size; |
setaddrbucket((uintptr)p, b); |
runtime_unlock(&proflock); |
m = runtime_m(); |
m->nomemprof--; |
} |
|
// Called when freeing a profiled block. |
void |
runtime_MProf_Free(void *p, uintptr size) |
{ |
M *m; |
Bucket *b; |
|
m = runtime_m(); |
if(m->nomemprof > 0) |
return; |
|
m->nomemprof++; |
runtime_lock(&proflock); |
b = getaddrbucket((uintptr)p); |
if(b != nil) { |
b->frees++; |
b->free_bytes += size; |
} |
runtime_unlock(&proflock); |
m = runtime_m(); |
m->nomemprof--; |
} |
|
|
// Go interface to profile data. (Declared in extern.go) |
// Assumes Go sizeof(int) == sizeof(int32) |
|
// Must match MemProfileRecord in extern.go. |
typedef struct Record Record; |
struct Record { |
int64 alloc_bytes, free_bytes; |
int64 alloc_objects, free_objects; |
uintptr stk[32]; |
}; |
|
// Write b's data to r. |
static void |
record(Record *r, Bucket *b) |
{ |
uint32 i; |
|
r->alloc_bytes = b->alloc_bytes; |
r->free_bytes = b->free_bytes; |
r->alloc_objects = b->allocs; |
r->free_objects = b->frees; |
for(i=0; i<b->nstk && i<nelem(r->stk); i++) |
r->stk[i] = b->stk[i]; |
for(; i<nelem(r->stk); i++) |
r->stk[i] = 0; |
} |
|
func MemProfile(p Slice, include_inuse_zero bool) (n int32, ok bool) { |
Bucket *b; |
Record *r; |
|
runtime_lock(&proflock); |
n = 0; |
for(b=buckets; b; b=b->allnext) |
if(include_inuse_zero || b->alloc_bytes != b->free_bytes) |
n++; |
ok = false; |
if(n <= p.__count) { |
ok = true; |
r = (Record*)p.__values; |
for(b=buckets; b; b=b->allnext) |
if(include_inuse_zero || b->alloc_bytes != b->free_bytes) |
record(r++, b); |
} |
runtime_unlock(&proflock); |
} |
|
void |
runtime_MProf_Mark(void (*scan)(byte *, int64)) |
{ |
// buckhash is not allocated via mallocgc. |
scan((byte*)&buckets, sizeof buckets); |
scan((byte*)&addrhash, sizeof addrhash); |
scan((byte*)&addrfree, sizeof addrfree); |
} |
/go-assert.h
0,0 → 1,18
/* go-assert.h -- libgo specific assertions |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_GO_ASSERT_H |
#define LIBGO_GO_ASSERT_H |
|
/* We use a Go specific assert function so that functions which call |
assert aren't required to always split the stack. */ |
|
extern void __go_assert_fail (const char *file, unsigned int lineno) |
__attribute__ ((noreturn)); |
|
#define __go_assert(e) ((e) ? (void) 0 : __go_assert_fail (__FILE__, __LINE__)) |
|
#endif /* !defined(LIBGO_GO_ASSERT_H) */ |
/go-map-range.c
0,0 → 1,102
/* go-map-range.c -- implement a range clause over a map. |
|
Copyright 2009, 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-assert.h" |
#include "map.h" |
|
/* Initialize a range over a map. */ |
|
void |
__go_mapiterinit (const struct __go_map *h, struct __go_hash_iter *it) |
{ |
it->entry = NULL; |
if (h != NULL) |
{ |
it->map = h; |
it->next_entry = NULL; |
it->bucket = 0; |
--it->bucket; |
__go_mapiternext(it); |
} |
} |
|
/* Move to the next iteration, updating *HITER. */ |
|
void |
__go_mapiternext (struct __go_hash_iter *it) |
{ |
const void *entry; |
|
entry = it->next_entry; |
if (entry == NULL) |
{ |
const struct __go_map *map; |
uintptr_t bucket; |
|
map = it->map; |
bucket = it->bucket; |
while (1) |
{ |
++bucket; |
if (bucket >= map->__bucket_count) |
{ |
/* Map iteration is complete. */ |
it->entry = NULL; |
return; |
} |
entry = map->__buckets[bucket]; |
if (entry != NULL) |
break; |
} |
it->bucket = bucket; |
} |
it->entry = entry; |
it->next_entry = *(const void * const *) entry; |
} |
|
/* Get the key of the current iteration. */ |
|
void |
__go_mapiter1 (struct __go_hash_iter *it, unsigned char *key) |
{ |
const struct __go_map *map; |
const struct __go_map_descriptor *descriptor; |
const struct __go_type_descriptor *key_descriptor; |
const char *p; |
|
map = it->map; |
descriptor = map->__descriptor; |
key_descriptor = descriptor->__map_descriptor->__key_type; |
p = it->entry; |
__go_assert (p != NULL); |
__builtin_memcpy (key, p + descriptor->__key_offset, key_descriptor->__size); |
} |
|
/* Get the key and value of the current iteration. */ |
|
void |
__go_mapiter2 (struct __go_hash_iter *it, unsigned char *key, |
unsigned char *val) |
{ |
const struct __go_map *map; |
const struct __go_map_descriptor *descriptor; |
const struct __go_map_type *map_descriptor; |
const struct __go_type_descriptor *key_descriptor; |
const struct __go_type_descriptor *val_descriptor; |
const char *p; |
|
map = it->map; |
descriptor = map->__descriptor; |
map_descriptor = descriptor->__map_descriptor; |
key_descriptor = map_descriptor->__key_type; |
val_descriptor = map_descriptor->__val_type; |
p = it->entry; |
__go_assert (p != NULL); |
__builtin_memcpy (key, p + descriptor->__key_offset, |
key_descriptor->__size); |
__builtin_memcpy (val, p + descriptor->__val_offset, |
val_descriptor->__size); |
} |
/go-type-error.c
0,0 → 1,28
/* go-type-error.c -- invalid hash and equality functions. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-type.h" |
|
/* A hash function used for a type which does not support hash |
functions. */ |
|
uintptr_t |
__go_type_hash_error (const void *val __attribute__ ((unused)), |
uintptr_t key_size __attribute__ ((unused))) |
{ |
runtime_panicstring ("hash of unhashable type"); |
} |
|
/* An equality function for an interface. */ |
|
_Bool |
__go_type_equal_error (const void *v1 __attribute__ ((unused)), |
const void *v2 __attribute__ ((unused)), |
uintptr_t key_size __attribute__ ((unused))) |
{ |
runtime_panicstring ("comparing uncomparable types"); |
} |
/go-strcmp.c
0,0 → 1,27
/* go-strcmp.c -- the go string comparison function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
|
int |
__go_strcmp(struct __go_string s1, struct __go_string s2) |
{ |
int i; |
|
i = __builtin_memcmp(s1.__data, s2.__data, |
(s1.__length < s2.__length |
? s1.__length |
: s2.__length)); |
if (i != 0) |
return i; |
|
if (s1.__length < s2.__length) |
return -1; |
else if (s1.__length > s2.__length) |
return 1; |
else |
return 0; |
} |
/malloc.h
0,0 → 1,424
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Memory allocator, based on tcmalloc. |
// http://goog-perftools.sourceforge.net/doc/tcmalloc.html |
|
// The main allocator works in runs of pages. |
// Small allocation sizes (up to and including 32 kB) are |
// rounded to one of about 100 size classes, each of which |
// has its own free list of objects of exactly that size. |
// Any free page of memory can be split into a set of objects |
// of one size class, which are then managed using free list |
// allocators. |
// |
// The allocator's data structures are: |
// |
// FixAlloc: a free-list allocator for fixed-size objects, |
// used to manage storage used by the allocator. |
// MHeap: the malloc heap, managed at page (4096-byte) granularity. |
// MSpan: a run of pages managed by the MHeap. |
// MCentral: a shared free list for a given size class. |
// MCache: a per-thread (in Go, per-M) cache for small objects. |
// MStats: allocation statistics. |
// |
// Allocating a small object proceeds up a hierarchy of caches: |
// |
// 1. Round the size up to one of the small size classes |
// and look in the corresponding MCache free list. |
// If the list is not empty, allocate an object from it. |
// This can all be done without acquiring a lock. |
// |
// 2. If the MCache free list is empty, replenish it by |
// taking a bunch of objects from the MCentral free list. |
// Moving a bunch amortizes the cost of acquiring the MCentral lock. |
// |
// 3. If the MCentral free list is empty, replenish it by |
// allocating a run of pages from the MHeap and then |
// chopping that memory into a objects of the given size. |
// Allocating many objects amortizes the cost of locking |
// the heap. |
// |
// 4. If the MHeap is empty or has no page runs large enough, |
// allocate a new group of pages (at least 1MB) from the |
// operating system. Allocating a large run of pages |
// amortizes the cost of talking to the operating system. |
// |
// Freeing a small object proceeds up the same hierarchy: |
// |
// 1. Look up the size class for the object and add it to |
// the MCache free list. |
// |
// 2. If the MCache free list is too long or the MCache has |
// too much memory, return some to the MCentral free lists. |
// |
// 3. If all the objects in a given span have returned to |
// the MCentral list, return that span to the page heap. |
// |
// 4. If the heap has too much memory, return some to the |
// operating system. |
// |
// TODO(rsc): Step 4 is not implemented. |
// |
// Allocating and freeing a large object uses the page heap |
// directly, bypassing the MCache and MCentral free lists. |
// |
// The small objects on the MCache and MCentral free lists |
// may or may not be zeroed. They are zeroed if and only if |
// the second word of the object is zero. The spans in the |
// page heap are always zeroed. When a span full of objects |
// is returned to the page heap, the objects that need to be |
// are zeroed first. There are two main benefits to delaying the |
// zeroing this way: |
// |
// 1. stack frames allocated from the small object lists |
// can avoid zeroing altogether. |
// 2. the cost of zeroing when reusing a small object is |
// charged to the mutator, not the garbage collector. |
// |
// This C code was written with an eye toward translating to Go |
// in the future. Methods have the form Type_Method(Type *t, ...). |
|
typedef struct MCentral MCentral; |
typedef struct MHeap MHeap; |
typedef struct MSpan MSpan; |
typedef struct MStats MStats; |
typedef struct MLink MLink; |
|
enum |
{ |
PageShift = 12, |
PageSize = 1<<PageShift, |
PageMask = PageSize - 1, |
}; |
typedef uintptr PageID; // address >> PageShift |
|
enum |
{ |
// Computed constant. The definition of MaxSmallSize and the |
// algorithm in msize.c produce some number of different allocation |
// size classes. NumSizeClasses is that number. It's needed here |
// because there are static arrays of this length; when msize runs its |
// size choosing algorithm it double-checks that NumSizeClasses agrees. |
NumSizeClasses = 61, |
|
// Tunable constants. |
MaxSmallSize = 32<<10, |
|
FixAllocChunk = 128<<10, // Chunk size for FixAlloc |
MaxMCacheListLen = 256, // Maximum objects on MCacheList |
MaxMCacheSize = 2<<20, // Maximum bytes in one MCache |
MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap. |
HeapAllocChunk = 1<<20, // Chunk size for heap growth |
|
// Number of bits in page to span calculations (4k pages). |
// On 64-bit, we limit the arena to 16G, so 22 bits suffices. |
// On 32-bit, we don't bother limiting anything: 20 bits for 4G. |
#if __SIZEOF_POINTER__ == 8 |
MHeapMap_Bits = 22, |
#else |
MHeapMap_Bits = 20, |
#endif |
|
// Max number of threads to run garbage collection. |
// 2, 3, and 4 are all plausible maximums depending |
// on the hardware details of the machine. The garbage |
// collector scales well to 4 cpus. |
MaxGcproc = 4, |
}; |
|
// A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).) |
struct MLink |
{ |
MLink *next; |
}; |
|
// SysAlloc obtains a large chunk of zeroed memory from the |
// operating system, typically on the order of a hundred kilobytes |
// or a megabyte. If the pointer argument is non-nil, the caller |
// wants a mapping there or nowhere. |
// |
// SysUnused notifies the operating system that the contents |
// of the memory region are no longer needed and can be reused |
// for other purposes. The program reserves the right to start |
// accessing those pages in the future. |
// |
// SysFree returns it unconditionally; this is only used if |
// an out-of-memory error has been detected midway through |
// an allocation. It is okay if SysFree is a no-op. |
// |
// SysReserve reserves address space without allocating memory. |
// If the pointer passed to it is non-nil, the caller wants the |
// reservation there, but SysReserve can still choose another |
// location if that one is unavailable. |
// |
// SysMap maps previously reserved address space for use. |
|
void* runtime_SysAlloc(uintptr nbytes); |
void runtime_SysFree(void *v, uintptr nbytes); |
void runtime_SysUnused(void *v, uintptr nbytes); |
void runtime_SysMap(void *v, uintptr nbytes); |
void* runtime_SysReserve(void *v, uintptr nbytes); |
|
// FixAlloc is a simple free-list allocator for fixed size objects. |
// Malloc uses a FixAlloc wrapped around SysAlloc to manages its |
// MCache and MSpan objects. |
// |
// Memory returned by FixAlloc_Alloc is not zeroed. |
// The caller is responsible for locking around FixAlloc calls. |
// Callers can keep state in the object but the first word is |
// smashed by freeing and reallocating. |
struct FixAlloc |
{ |
uintptr size; |
void *(*alloc)(uintptr); |
void (*first)(void *arg, byte *p); // called first time p is returned |
void *arg; |
MLink *list; |
byte *chunk; |
uint32 nchunk; |
uintptr inuse; // in-use bytes now |
uintptr sys; // bytes obtained from system |
}; |
|
void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg); |
void* runtime_FixAlloc_Alloc(FixAlloc *f); |
void runtime_FixAlloc_Free(FixAlloc *f, void *p); |
|
|
// Statistics. |
// Shared with Go: if you edit this structure, also edit extern.go. |
struct MStats |
{ |
// General statistics. |
uint64 alloc; // bytes allocated and still in use |
uint64 total_alloc; // bytes allocated (even if freed) |
uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate) |
uint64 nlookup; // number of pointer lookups |
uint64 nmalloc; // number of mallocs |
uint64 nfree; // number of frees |
|
// Statistics about malloc heap. |
// protected by mheap.Lock |
uint64 heap_alloc; // bytes allocated and still in use |
uint64 heap_sys; // bytes obtained from system |
uint64 heap_idle; // bytes in idle spans |
uint64 heap_inuse; // bytes in non-idle spans |
uint64 heap_objects; // total number of allocated objects |
|
// Statistics about allocation of low-level fixed-size structures. |
// Protected by FixAlloc locks. |
uint64 stacks_inuse; // bootstrap stacks |
uint64 stacks_sys; |
uint64 mspan_inuse; // MSpan structures |
uint64 mspan_sys; |
uint64 mcache_inuse; // MCache structures |
uint64 mcache_sys; |
uint64 buckhash_sys; // profiling bucket hash table |
|
// Statistics about garbage collector. |
// Protected by stopping the world during GC. |
uint64 next_gc; // next GC (in heap_alloc time) |
uint64 pause_total_ns; |
uint64 pause_ns[256]; |
uint32 numgc; |
bool enablegc; |
bool debuggc; |
|
// Statistics about allocation size classes. |
struct { |
uint32 size; |
uint64 nmalloc; |
uint64 nfree; |
} by_size[NumSizeClasses]; |
}; |
|
extern MStats mstats |
__asm__ ("libgo_runtime.runtime.VmemStats"); |
|
|
// Size classes. Computed and initialized by InitSizes. |
// |
// SizeToClass(0 <= n <= MaxSmallSize) returns the size class, |
// 1 <= sizeclass < NumSizeClasses, for n. |
// Size class 0 is reserved to mean "not small". |
// |
// class_to_size[i] = largest size in class i |
// class_to_allocnpages[i] = number of pages to allocate when |
// making new objects in class i |
// class_to_transfercount[i] = number of objects to move when |
// taking a bunch of objects out of the central lists |
// and putting them in the thread free list. |
|
int32 runtime_SizeToClass(int32); |
extern int32 runtime_class_to_size[NumSizeClasses]; |
extern int32 runtime_class_to_allocnpages[NumSizeClasses]; |
extern int32 runtime_class_to_transfercount[NumSizeClasses]; |
extern void runtime_InitSizes(void); |
|
|
// Per-thread (in Go, per-M) cache for small objects. |
// No locking needed because it is per-thread (per-M). |
typedef struct MCacheList MCacheList; |
struct MCacheList |
{ |
MLink *list; |
uint32 nlist; |
uint32 nlistmin; |
}; |
|
struct MCache |
{ |
MCacheList list[NumSizeClasses]; |
uint64 size; |
int64 local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap |
int64 local_objects; // objects allocated (or freed) from cache since last lock of heap |
int64 local_alloc; // bytes allocated (or freed) since last lock of heap |
int64 local_total_alloc; // bytes allocated (even if freed) since last lock of heap |
int64 local_nmalloc; // number of mallocs since last lock of heap |
int64 local_nfree; // number of frees since last lock of heap |
int64 local_nlookup; // number of pointer lookups since last lock of heap |
int32 next_sample; // trigger heap sample after allocating this many bytes |
// Statistics about allocation size classes since last lock of heap |
struct { |
int64 nmalloc; |
int64 nfree; |
} local_by_size[NumSizeClasses]; |
|
}; |
|
void* runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed); |
void runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size); |
void runtime_MCache_ReleaseAll(MCache *c); |
|
// An MSpan is a run of pages. |
enum |
{ |
MSpanInUse = 0, |
MSpanFree, |
MSpanListHead, |
MSpanDead, |
}; |
struct MSpan |
{ |
MSpan *next; // in a span linked list |
MSpan *prev; // in a span linked list |
MSpan *allnext; // in the list of all spans |
PageID start; // starting page number |
uintptr npages; // number of pages in span |
MLink *freelist; // list of free objects |
uint32 ref; // number of allocated objects in this span |
uint32 sizeclass; // size class |
uint32 state; // MSpanInUse etc |
byte *limit; // end of data in span |
}; |
|
void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages); |
|
// Every MSpan is in one doubly-linked list, |
// either one of the MHeap's free lists or one of the |
// MCentral's span lists. We use empty MSpan structures as list heads. |
void runtime_MSpanList_Init(MSpan *list); |
bool runtime_MSpanList_IsEmpty(MSpan *list); |
void runtime_MSpanList_Insert(MSpan *list, MSpan *span); |
void runtime_MSpanList_Remove(MSpan *span); // from whatever list it is in |
|
|
// Central list of free objects of a given size. |
struct MCentral |
{ |
Lock; |
int32 sizeclass; |
MSpan nonempty; |
MSpan empty; |
int32 nfree; |
}; |
|
void runtime_MCentral_Init(MCentral *c, int32 sizeclass); |
int32 runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first); |
void runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first); |
|
// Main malloc heap. |
// The heap itself is the "free[]" and "large" arrays, |
// but all the other global data is here too. |
struct MHeap |
{ |
Lock; |
MSpan free[MaxMHeapList]; // free lists of given length |
MSpan large; // free lists length >= MaxMHeapList |
MSpan *allspans; |
|
// span lookup |
MSpan *map[1<<MHeapMap_Bits]; |
|
// range of addresses we might see in the heap |
byte *bitmap; |
uintptr bitmap_mapped; |
byte *arena_start; |
byte *arena_used; |
byte *arena_end; |
|
// central free lists for small size classes. |
// the union makes sure that the MCentrals are |
// spaced CacheLineSize bytes apart, so that each MCentral.Lock |
// gets its own cache line. |
union { |
MCentral; |
byte pad[CacheLineSize]; |
} central[NumSizeClasses]; |
|
FixAlloc spanalloc; // allocator for Span* |
FixAlloc cachealloc; // allocator for MCache* |
}; |
extern MHeap runtime_mheap; |
|
void runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr)); |
MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct); |
void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct); |
MSpan* runtime_MHeap_Lookup(MHeap *h, void *v); |
MSpan* runtime_MHeap_LookupMaybe(MHeap *h, void *v); |
void runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj); |
void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n); |
void runtime_MHeap_MapBits(MHeap *h); |
|
void* runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed); |
int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s); |
void runtime_gc(int32 force); |
void runtime_markallocated(void *v, uintptr n, bool noptr); |
void runtime_checkallocated(void *v, uintptr n); |
void runtime_markfreed(void *v, uintptr n); |
void runtime_checkfreed(void *v, uintptr n); |
int32 runtime_checking; |
void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover); |
void runtime_unmarkspan(void *v, uintptr size); |
bool runtime_blockspecial(void*); |
void runtime_setblockspecial(void*, bool); |
void runtime_purgecachedstats(M*); |
|
enum |
{ |
// flags to malloc |
FlagNoPointers = 1<<0, // no pointers here |
FlagNoProfiling = 1<<1, // must not profile |
FlagNoGC = 1<<2, // must not free or scan for pointers |
}; |
|
void runtime_MProf_Malloc(void*, uintptr); |
void runtime_MProf_Free(void*, uintptr); |
void runtime_MProf_Mark(void (*scan)(byte *, int64)); |
int32 runtime_helpgc(bool*); |
void runtime_gchelper(void); |
|
// Malloc profiling settings. |
// Must match definition in extern.go. |
enum { |
MProf_None = 0, |
MProf_Sample = 1, |
MProf_All = 2, |
}; |
extern int32 runtime_malloc_profile; |
|
struct __go_func_type; |
bool runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft); |
void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)); |
/go-make-slice.c
0,0 → 1,83
/* go-make-slice.c -- make a slice. |
|
Copyright 2011 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdint.h> |
|
#include "go-alloc.h" |
#include "go-assert.h" |
#include "go-panic.h" |
#include "go-type.h" |
#include "array.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_open_array |
__go_make_slice2 (const struct __go_type_descriptor *td, uintptr_t len, |
uintptr_t cap) |
{ |
const struct __go_slice_type* std; |
int ilen; |
int icap; |
uintptr_t size; |
struct __go_open_array ret; |
unsigned int flag; |
|
__go_assert (td->__code == GO_SLICE); |
std = (const struct __go_slice_type *) td; |
|
ilen = (int) len; |
if (ilen < 0 || (uintptr_t) ilen != len) |
runtime_panicstring ("makeslice: len out of range"); |
|
icap = (int) cap; |
if (cap < len |
|| (uintptr_t) icap != cap |
|| (std->__element_type->__size > 0 |
&& cap > (uintptr_t) -1U / std->__element_type->__size)) |
runtime_panicstring ("makeslice: cap out of range"); |
|
ret.__count = ilen; |
ret.__capacity = icap; |
|
size = cap * std->__element_type->__size; |
flag = ((std->__element_type->__code & GO_NO_POINTERS) != 0 |
? FlagNoPointers |
: 0); |
ret.__values = runtime_mallocgc (size, flag, 1, 1); |
|
return ret; |
} |
|
struct __go_open_array |
__go_make_slice1 (const struct __go_type_descriptor *td, uintptr_t len) |
{ |
return __go_make_slice2 (td, len, len); |
} |
|
struct __go_open_array |
__go_make_slice2_big (const struct __go_type_descriptor *td, uint64_t len, |
uint64_t cap) |
{ |
uintptr_t slen; |
uintptr_t scap; |
|
slen = (uintptr_t) len; |
if ((uint64_t) slen != len) |
runtime_panicstring ("makeslice: len out of range"); |
|
scap = (uintptr_t) cap; |
if ((uint64_t) scap != cap) |
runtime_panicstring ("makeslice: cap out of range"); |
|
return __go_make_slice2 (td, slen, scap); |
} |
|
struct __go_open_array |
__go_make_slice1_big (const struct __go_type_descriptor *td, uint64_t len) |
{ |
return __go_make_slice2_big (td, len, len); |
} |
/go-nosys.c
0,0 → 1,220
/* go-nosys.c -- functions missing from system. |
|
Copyright 2012 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
/* This file exists to provide definitions for functions that are |
missing from libc, according to the configure script. This permits |
the Go syscall package to not worry about whether the functions |
exist or not. */ |
|
#include "config.h" |
|
#include <errno.h> |
#include <fcntl.h> |
#include <stdint.h> |
#include <sys/types.h> |
#include <sys/stat.h> |
#include <sys/time.h> |
#include <unistd.h> |
|
#ifndef HAVE_OFF64_T |
typedef signed int off64_t __attribute__ ((mode (DI))); |
#endif |
|
#ifndef HAVE_LOFF_T |
typedef off64_t loff_t; |
#endif |
|
#ifndef HAVE_EPOLL_CREATE1 |
int |
epoll_create1 (int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_FACCESSAT |
int |
faccessat (int fd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
int mode __attribute__ ((unused)), |
int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_FALLOCATE |
int |
fallocate (int fd __attribute__ ((unused)), |
int mode __attribute__ ((unused)), |
off_t offset __attribute__ ((unused)), |
off_t len __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_FCHMODAT |
int |
fchmodat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
mode_t mode __attribute__ ((unused)), |
int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_FCHOWNAT |
int |
fchownat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
uid_t owner __attribute__ ((unused)), |
gid_t group __attribute__ ((unused)), |
int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_FUTIMESAT |
int |
futimesat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
const struct timeval times[2] __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_INOTIFY_ADD_WATCH |
int |
inotify_add_watch (int fd __attribute__ ((unused)), |
const char* pathname __attribute__ ((unused)), |
uint32_t mask __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_INOTIFY_INIT |
int |
inotify_init (void) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_INOTIFY_RM_WATCH |
int |
inotify_rm_watch (int fd __attribute__ ((unused)), |
uint32_t wd __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_MKDIRAT |
int |
mkdirat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
mode_t mode __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_MKNODAT |
int |
mknodat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
mode_t mode __attribute__ ((unused)), |
dev_t dev __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_OPENAT |
int |
openat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
int oflag __attribute__ ((unused)), |
...) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_RENAMEAT |
int |
renameat (int olddirfd __attribute__ ((unused)), |
const char *oldpath __attribute__ ((unused)), |
int newdirfd __attribute__ ((unused)), |
const char *newpath __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_SPLICE |
int |
splice (int fd __attribute__ ((unused)), |
loff_t *off_in __attribute__ ((unused)), |
int fd_out __attribute__ ((unused)), |
loff_t *off_out __attribute__ ((unused)), |
size_t len __attribute__ ((unused)), |
unsigned int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_TEE |
int |
tee (int fd_in __attribute__ ((unused)), |
int fd_out __attribute__ ((unused)), |
size_t len __attribute__ ((unused)), |
unsigned int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_UNLINKAT |
int |
unlinkat (int dirfd __attribute__ ((unused)), |
const char *pathname __attribute__ ((unused)), |
int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
|
#ifndef HAVE_UNSHARE |
int |
unshare (int flags __attribute__ ((unused))) |
{ |
errno = ENOSYS; |
return -1; |
} |
#endif |
/go-can-convert-interface.c
0,0 → 1,76
/* go-can-convert-interface.c -- can we convert to an interface? |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-assert.h" |
#include "go-type.h" |
#include "interface.h" |
|
/* Return whether we can convert from the type in FROM_DESCRIPTOR to |
the interface in TO_DESCRIPTOR. This is used for type |
switches. */ |
|
_Bool |
__go_can_convert_to_interface ( |
const struct __go_type_descriptor *to_descriptor, |
const struct __go_type_descriptor *from_descriptor) |
{ |
const struct __go_interface_type *to_interface; |
int to_method_count; |
const struct __go_interface_method *to_method; |
const struct __go_uncommon_type *from_uncommon; |
int from_method_count; |
const struct __go_method *from_method; |
int i; |
|
/* In a type switch FROM_DESCRIPTOR can be NULL. */ |
if (from_descriptor == NULL) |
return 0; |
|
__go_assert (to_descriptor->__code == GO_INTERFACE); |
to_interface = (const struct __go_interface_type *) to_descriptor; |
to_method_count = to_interface->__methods.__count; |
to_method = ((const struct __go_interface_method *) |
to_interface->__methods.__values); |
|
from_uncommon = from_descriptor->__uncommon; |
if (from_uncommon == NULL) |
{ |
from_method_count = 0; |
from_method = NULL; |
} |
else |
{ |
from_method_count = from_uncommon->__methods.__count; |
from_method = ((const struct __go_method *) |
from_uncommon->__methods.__values); |
} |
|
for (i = 0; i < to_method_count; ++i) |
{ |
while (from_method_count > 0 |
&& (!__go_ptr_strings_equal (from_method->__name, |
to_method->__name) |
|| !__go_ptr_strings_equal (from_method->__pkg_path, |
to_method->__pkg_path))) |
{ |
++from_method; |
--from_method_count; |
} |
|
if (from_method_count == 0) |
return 0; |
|
if (!__go_type_descriptors_equal (from_method->__mtype, |
to_method->__type)) |
return 0; |
|
++to_method; |
++from_method; |
--from_method_count; |
} |
|
return 1; |
} |
/go-int-to-string.c
0,0 → 1,61
/* go-int-to-string.c -- convert an integer to a string in Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_string |
__go_int_to_string (int v) |
{ |
char buf[4]; |
int len; |
unsigned char *retdata; |
struct __go_string ret; |
|
if (v <= 0x7f) |
{ |
buf[0] = v; |
len = 1; |
} |
else if (v <= 0x7ff) |
{ |
buf[0] = 0xc0 + (v >> 6); |
buf[1] = 0x80 + (v & 0x3f); |
len = 2; |
} |
else |
{ |
/* If the value is out of range for UTF-8, turn it into the |
"replacement character". */ |
if (v > 0x10ffff) |
v = 0xfffd; |
|
if (v <= 0xffff) |
{ |
buf[0] = 0xe0 + (v >> 12); |
buf[1] = 0x80 + ((v >> 6) & 0x3f); |
buf[2] = 0x80 + (v & 0x3f); |
len = 3; |
} |
else |
{ |
buf[0] = 0xf0 + (v >> 18); |
buf[1] = 0x80 + ((v >> 12) & 0x3f); |
buf[2] = 0x80 + ((v >> 6) & 0x3f); |
buf[3] = 0x80 + (v & 0x3f); |
len = 4; |
} |
} |
|
retdata = runtime_mallocgc (len, FlagNoPointers, 1, 0); |
__builtin_memcpy (retdata, buf, len); |
ret.__data = retdata; |
ret.__length = len; |
|
return ret; |
} |
/runtime.c
0,0 → 1,217
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include <unistd.h> |
|
#include "runtime.h" |
#include "array.h" |
#include "go-panic.h" |
#include "go-string.h" |
|
uint32 runtime_panicking; |
|
static Lock paniclk; |
|
void |
runtime_startpanic(void) |
{ |
M *m; |
|
m = runtime_m(); |
if(m->dying) { |
runtime_printf("panic during panic\n"); |
runtime_exit(3); |
} |
m->dying = 1; |
runtime_xadd(&runtime_panicking, 1); |
runtime_lock(&paniclk); |
} |
|
void |
runtime_dopanic(int32 unused __attribute__ ((unused))) |
{ |
/* |
static bool didothers; |
|
if(g->sig != 0) |
runtime_printf("[signal %x code=%p addr=%p pc=%p]\n", |
g->sig, g->sigcode0, g->sigcode1, g->sigpc); |
|
if(runtime_gotraceback()){ |
if(!didothers) { |
didothers = true; |
runtime_tracebackothers(g); |
} |
} |
*/ |
|
runtime_unlock(&paniclk); |
if(runtime_xadd(&runtime_panicking, -1) != 0) { |
// Some other m is panicking too. |
// Let it print what it needs to print. |
// Wait forever without chewing up cpu. |
// It will exit when it's done. |
static Lock deadlock; |
runtime_lock(&deadlock); |
runtime_lock(&deadlock); |
} |
|
runtime_exit(2); |
} |
|
void |
runtime_throw(const char *s) |
{ |
runtime_startpanic(); |
runtime_printf("throw: %s\n", s); |
runtime_dopanic(0); |
*(int32*)0 = 0; // not reached |
runtime_exit(1); // even more not reached |
} |
|
void |
runtime_panicstring(const char *s) |
{ |
Eface err; |
|
if(runtime_m()->gcing) { |
runtime_printf("panic: %s\n", s); |
runtime_throw("panic during gc"); |
} |
runtime_newErrorString(runtime_gostringnocopy((const byte*)s), &err); |
runtime_panic(err); |
} |
|
static int32 argc; |
static byte** argv; |
|
extern Slice os_Args asm ("libgo_os.os.Args"); |
extern Slice syscall_Envs asm ("libgo_syscall.syscall.Envs"); |
|
void |
runtime_args(int32 c, byte **v) |
{ |
argc = c; |
argv = v; |
} |
|
void |
runtime_goargs(void) |
{ |
String *s; |
int32 i; |
|
// for windows implementation see "os" package |
if(Windows) |
return; |
|
s = runtime_malloc(argc*sizeof s[0]); |
for(i=0; i<argc; i++) |
s[i] = runtime_gostringnocopy((const byte*)argv[i]); |
os_Args.__values = (void*)s; |
os_Args.__count = argc; |
os_Args.__capacity = argc; |
} |
|
void |
runtime_goenvs_unix(void) |
{ |
String *s; |
int32 i, n; |
|
for(n=0; argv[argc+1+n] != 0; n++) |
; |
|
s = runtime_malloc(n*sizeof s[0]); |
for(i=0; i<n; i++) |
s[i] = runtime_gostringnocopy(argv[argc+1+i]); |
syscall_Envs.__values = (void*)s; |
syscall_Envs.__count = n; |
syscall_Envs.__capacity = n; |
} |
|
const byte* |
runtime_getenv(const char *s) |
{ |
int32 i, j, len; |
const byte *v, *bs; |
String* envv; |
int32 envc; |
|
bs = (const byte*)s; |
len = runtime_findnull(bs); |
envv = (String*)syscall_Envs.__values; |
envc = syscall_Envs.__count; |
for(i=0; i<envc; i++){ |
if(envv[i].__length <= len) |
continue; |
v = (const byte*)envv[i].__data; |
for(j=0; j<len; j++) |
if(bs[j] != v[j]) |
goto nomatch; |
if(v[len] != '=') |
goto nomatch; |
return v+len+1; |
nomatch:; |
} |
return nil; |
} |
|
int32 |
runtime_atoi(const byte *p) |
{ |
int32 n; |
|
n = 0; |
while('0' <= *p && *p <= '9') |
n = n*10 + *p++ - '0'; |
return n; |
} |
|
uint32 |
runtime_fastrand1(void) |
{ |
M *m; |
uint32 x; |
|
m = runtime_m(); |
x = m->fastrand; |
x += x; |
if(x & 0x80000000L) |
x ^= 0x88888eefUL; |
m->fastrand = x; |
return x; |
} |
|
int64 |
runtime_cputicks(void) |
{ |
#if defined(__386__) || defined(__x86_64__) |
uint32 low, high; |
asm("rdtsc" : "=a" (low), "=d" (high)); |
return (int64)(((uint64)high << 32) | (uint64)low); |
#else |
// FIXME: implement for other processors. |
return 0; |
#endif |
} |
|
struct funcline_go_return |
{ |
String retfile; |
int32 retline; |
}; |
|
struct funcline_go_return |
runtime_funcline_go(void *f, uintptr targetpc) |
__asm__("libgo_runtime.runtime.funcline_go"); |
|
struct funcline_go_return |
runtime_funcline_go(void *f __attribute__((unused)), |
uintptr targetpc __attribute__((unused))) |
{ |
struct funcline_go_return ret; |
runtime_memclr(&ret, sizeof ret); |
return ret; |
} |
/time.goc
0,0 → 1,263
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Time-related runtime and pieces of package time. |
|
package time |
|
#include "runtime.h" |
#include "defs.h" |
#include "arch.h" |
#include "malloc.h" |
|
static Timers timers; |
static void addtimer(Timer*); |
static bool deltimer(Timer*); |
|
// Package time APIs. |
// Godoc uses the comments in package time, not these. |
|
// time.now is implemented in assembly. |
|
// Sleep puts the current goroutine to sleep for at least ns nanoseconds. |
func Sleep(ns int64) { |
G *g; |
|
g = runtime_g(); |
g->status = Gwaiting; |
g->waitreason = "sleep"; |
runtime_tsleep(ns); |
} |
|
// startTimer adds t to the timer heap. |
func startTimer(t *Timer) { |
addtimer(t); |
} |
|
// stopTimer removes t from the timer heap if it is there. |
// It returns true if t was removed, false if t wasn't even there. |
func stopTimer(t *Timer) (stopped bool) { |
stopped = deltimer(t); |
} |
|
// C runtime. |
|
static void timerproc(void*); |
static void siftup(int32); |
static void siftdown(int32); |
|
// Ready the goroutine e.data. |
static void |
ready(int64 now, Eface e) |
{ |
USED(now); |
|
runtime_ready(e.__object); |
} |
|
// Put the current goroutine to sleep for ns nanoseconds. |
// The caller must have set g->status and g->waitreason. |
void |
runtime_tsleep(int64 ns) |
{ |
Timer t; |
|
if(ns <= 0) |
return; |
|
t.when = runtime_nanotime() + ns; |
t.period = 0; |
t.f = ready; |
t.arg.__object = runtime_g(); |
addtimer(&t); |
runtime_gosched(); |
} |
|
// Add a timer to the heap and start or kick the timer proc |
// if the new timer is earlier than any of the others. |
static void |
addtimer(Timer *t) |
{ |
int32 n; |
Timer **nt; |
|
runtime_lock(&timers); |
if(timers.len >= timers.cap) { |
// Grow slice. |
n = 16; |
if(n <= timers.cap) |
n = timers.cap*3 / 2; |
nt = runtime_malloc(n*sizeof nt[0]); |
runtime_memmove(nt, timers.t, timers.len*sizeof nt[0]); |
runtime_free(timers.t); |
timers.t = nt; |
timers.cap = n; |
} |
t->i = timers.len++; |
timers.t[t->i] = t; |
siftup(t->i); |
if(t->i == 0) { |
// siftup moved to top: new earliest deadline. |
if(timers.sleeping) { |
timers.sleeping = false; |
runtime_notewakeup(&timers.waitnote); |
} |
if(timers.rescheduling) { |
timers.rescheduling = false; |
runtime_ready(timers.timerproc); |
} |
} |
if(timers.timerproc == nil) |
timers.timerproc = __go_go(timerproc, nil); |
runtime_unlock(&timers); |
} |
|
// Delete timer t from the heap. |
// Do not need to update the timerproc: |
// if it wakes up early, no big deal. |
static bool |
deltimer(Timer *t) |
{ |
int32 i; |
|
runtime_lock(&timers); |
|
// t may not be registered anymore and may have |
// a bogus i (typically 0, if generated by Go). |
// Verify it before proceeding. |
i = t->i; |
if(i < 0 || i >= timers.len || timers.t[i] != t) { |
runtime_unlock(&timers); |
return false; |
} |
|
timers.len--; |
if(i == timers.len) { |
timers.t[i] = nil; |
} else { |
timers.t[i] = timers.t[timers.len]; |
timers.t[timers.len] = nil; |
timers.t[i]->i = i; |
siftup(i); |
siftdown(i); |
} |
runtime_unlock(&timers); |
return true; |
} |
|
// Timerproc runs the time-driven events. |
// It sleeps until the next event in the timers heap. |
// If addtimer inserts a new earlier event, addtimer |
// wakes timerproc early. |
static void |
timerproc(void* dummy __attribute__ ((unused))) |
{ |
G *g; |
int64 delta, now; |
Timer *t; |
void (*f)(int64, Eface); |
Eface arg; |
|
g = runtime_g(); |
for(;;) { |
runtime_lock(&timers); |
now = runtime_nanotime(); |
for(;;) { |
if(timers.len == 0) { |
delta = -1; |
break; |
} |
t = timers.t[0]; |
delta = t->when - now; |
if(delta > 0) |
break; |
if(t->period > 0) { |
// leave in heap but adjust next time to fire |
t->when += t->period * (1 + -delta/t->period); |
siftdown(0); |
} else { |
// remove from heap |
timers.t[0] = timers.t[--timers.len]; |
timers.t[0]->i = 0; |
siftdown(0); |
t->i = -1; // mark as removed |
} |
f = t->f; |
arg = t->arg; |
runtime_unlock(&timers); |
f(now, arg); |
runtime_lock(&timers); |
} |
if(delta < 0) { |
// No timers left - put goroutine to sleep. |
timers.rescheduling = true; |
g->status = Gwaiting; |
g->waitreason = "timer goroutine (idle)"; |
runtime_unlock(&timers); |
runtime_gosched(); |
continue; |
} |
// At least one timer pending. Sleep until then. |
timers.sleeping = true; |
runtime_noteclear(&timers.waitnote); |
runtime_unlock(&timers); |
runtime_entersyscall(); |
runtime_notetsleep(&timers.waitnote, delta); |
runtime_exitsyscall(); |
} |
} |
|
// heap maintenance algorithms. |
|
static void |
siftup(int32 i) |
{ |
int32 p; |
Timer **t, *tmp; |
|
t = timers.t; |
while(i > 0) { |
p = (i-1)/2; // parent |
if(t[i]->when >= t[p]->when) |
break; |
tmp = t[i]; |
t[i] = t[p]; |
t[p] = tmp; |
t[i]->i = i; |
t[p]->i = p; |
i = p; |
} |
} |
|
static void |
siftdown(int32 i) |
{ |
int32 c, len; |
Timer **t, *tmp; |
|
t = timers.t; |
len = timers.len; |
for(;;) { |
c = i*2 + 1; // left child |
if(c >= len) { |
break; |
} |
if(c+1 < len && t[c+1]->when < t[c]->when) |
c++; |
if(t[c]->when >= t[i]->when) |
break; |
tmp = t[i]; |
t[i] = t[c]; |
t[c] = tmp; |
t[i]->i = i; |
t[c]->i = c; |
i = c; |
} |
} |
|
void |
runtime_time_scan(void (*scan)(byte*, int64)) |
{ |
scan((byte*)&timers, sizeof timers); |
} |
/go-type-eface.c
0,0 → 1,59
/* go-type-eface.c -- hash and equality empty interface functions. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "interface.h" |
#include "go-type.h" |
|
/* A hash function for an empty interface. */ |
|
uintptr_t |
__go_type_hash_empty_interface (const void *vval, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
const struct __go_empty_interface *val; |
const struct __go_type_descriptor *descriptor; |
uintptr_t size; |
|
val = (const struct __go_empty_interface *) vval; |
descriptor = val->__type_descriptor; |
if (descriptor == NULL) |
return 0; |
size = descriptor->__size; |
if (__go_is_pointer_type (descriptor)) |
return descriptor->__hashfn (&val->__object, size); |
else |
return descriptor->__hashfn (val->__object, size); |
} |
|
/* An equality function for an empty interface. */ |
|
_Bool |
__go_type_equal_empty_interface (const void *vv1, const void *vv2, |
uintptr_t key_size __attribute__ ((unused))) |
{ |
const struct __go_empty_interface *v1; |
const struct __go_empty_interface *v2; |
const struct __go_type_descriptor* v1_descriptor; |
const struct __go_type_descriptor* v2_descriptor; |
|
v1 = (const struct __go_empty_interface *) vv1; |
v2 = (const struct __go_empty_interface *) vv2; |
v1_descriptor = v1->__type_descriptor; |
v2_descriptor = v2->__type_descriptor; |
if (((uintptr_t) v1_descriptor & reflectFlags) != 0 |
|| ((uintptr_t) v2_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
if (v1_descriptor == NULL || v2_descriptor == NULL) |
return v1_descriptor == v2_descriptor; |
if (!__go_type_descriptors_equal (v1_descriptor, v2_descriptor)) |
return 0; |
if (__go_is_pointer_type (v1_descriptor)) |
return v1->__object == v2->__object; |
else |
return v1_descriptor->__equalfn (v1->__object, v2->__object, |
v1_descriptor->__size); |
} |
/go-main.c
0,0 → 1,56
/* go-main.c -- the main function for a Go program. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "config.h" |
|
#include <stdlib.h> |
#include <time.h> |
#include <unistd.h> |
|
#ifdef HAVE_FPU_CONTROL_H |
#include <fpu_control.h> |
#endif |
|
#include "go-alloc.h" |
#include "array.h" |
#include "go-string.h" |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
#undef int |
#undef char |
#undef unsigned |
|
/* The main function for a Go program. This records the command line |
parameters, calls the real main function, and returns a zero status |
if the real main function returns. */ |
|
extern char **environ; |
|
extern void runtime_main (void); |
static void mainstart (void *); |
|
/* The main function. */ |
|
int |
main (int argc, char **argv) |
{ |
runtime_initsig (0); |
runtime_args (argc, (byte **) argv); |
runtime_osinit (); |
runtime_schedinit (); |
__go_go (mainstart, NULL); |
runtime_mstart (runtime_m ()); |
abort (); |
} |
|
static void |
mainstart (void *arg __attribute__ ((unused))) |
{ |
runtime_main (); |
} |
/go-interface-compare.c
0,0 → 1,31
/* go-interface-compare.c -- compare two interface values. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "interface.h" |
|
/* Compare two interface values. Return 0 for equal, not zero for not |
equal (return value is like strcmp). */ |
|
int |
__go_interface_compare (struct __go_interface left, |
struct __go_interface right) |
{ |
const struct __go_type_descriptor *left_descriptor; |
|
if (left.__methods == NULL && right.__methods == NULL) |
return 0; |
if (left.__methods == NULL || right.__methods == NULL) |
return 1; |
left_descriptor = left.__methods[0]; |
if (!__go_type_descriptors_equal (left_descriptor, right.__methods[0])) |
return 1; |
if (__go_is_pointer_type (left_descriptor)) |
return left.__object == right.__object ? 0 : 1; |
if (!left_descriptor->__equalfn (left.__object, right.__object, |
left_descriptor->__size)) |
return 1; |
return 0; |
} |
/runtime.h
0,0 → 1,402
/* runtime.h -- runtime support for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "config.h" |
|
#include "go-assert.h" |
#include <setjmp.h> |
#include <signal.h> |
#include <stdio.h> |
#include <stdlib.h> |
#include <string.h> |
#include <sys/types.h> |
#include <sys/stat.h> |
#include <fcntl.h> |
#include <pthread.h> |
#include <semaphore.h> |
#include <ucontext.h> |
|
#ifdef HAVE_SYS_MMAN_H |
#include <sys/mman.h> |
#endif |
|
#include "array.h" |
#include "go-alloc.h" |
#include "go-panic.h" |
#include "go-string.h" |
|
/* This file supports C files copied from the 6g runtime library. |
This is a version of the 6g runtime.h rewritten for gccgo's version |
of the code. */ |
|
typedef signed int int8 __attribute__ ((mode (QI))); |
typedef unsigned int uint8 __attribute__ ((mode (QI))); |
typedef signed int int16 __attribute__ ((mode (HI))); |
typedef unsigned int uint16 __attribute__ ((mode (HI))); |
typedef signed int int32 __attribute__ ((mode (SI))); |
typedef unsigned int uint32 __attribute__ ((mode (SI))); |
typedef signed int int64 __attribute__ ((mode (DI))); |
typedef unsigned int uint64 __attribute__ ((mode (DI))); |
typedef float float32 __attribute__ ((mode (SF))); |
typedef double float64 __attribute__ ((mode (DF))); |
typedef unsigned int uintptr __attribute__ ((mode (pointer))); |
|
/* Defined types. */ |
|
typedef uint8 bool; |
typedef uint8 byte; |
typedef struct G G; |
typedef union Lock Lock; |
typedef struct M M; |
typedef union Note Note; |
typedef struct SigTab SigTab; |
typedef struct MCache MCache; |
typedef struct FixAlloc FixAlloc; |
typedef struct Hchan Hchan; |
typedef struct Timers Timers; |
typedef struct Timer Timer; |
|
typedef struct __go_open_array Slice; |
typedef struct __go_string String; |
typedef struct __go_interface Iface; |
typedef struct __go_empty_interface Eface; |
typedef struct __go_type_descriptor Type; |
typedef struct __go_defer_stack Defer; |
typedef struct __go_panic_stack Panic; |
|
typedef struct __go_func_type FuncType; |
typedef struct __go_map_type MapType; |
|
/* |
* per-cpu declaration. |
*/ |
extern M* runtime_m(void); |
extern G* runtime_g(void); |
|
extern M runtime_m0; |
extern G runtime_g0; |
|
/* |
* defined constants |
*/ |
enum |
{ |
// G status |
// |
// If you add to this list, add to the list |
// of "okay during garbage collection" status |
// in mgc0.c too. |
Gidle, |
Grunnable, |
Grunning, |
Gsyscall, |
Gwaiting, |
Gmoribund, |
Gdead, |
}; |
enum |
{ |
true = 1, |
false = 0, |
}; |
|
/* |
* structures |
*/ |
union Lock |
{ |
uint32 key; // futex-based impl |
M* waitm; // linked list of waiting M's (sema-based impl) |
}; |
union Note |
{ |
uint32 key; // futex-based impl |
M* waitm; // waiting M (sema-based impl) |
}; |
struct G |
{ |
Defer* defer; |
Panic* panic; |
void* exception; // current exception being thrown |
bool is_foreign; // whether current exception from other language |
void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc |
uintptr gcstack_size; |
void* gcnext_segment; |
void* gcnext_sp; |
void* gcinitial_sp; |
jmp_buf gcregs; |
byte* entry; // initial function |
G* alllink; // on allg |
void* param; // passed parameter on wakeup |
bool fromgogo; // reached from gogo |
int16 status; |
int32 goid; |
uint32 selgen; // valid sudog pointer |
const char* waitreason; // if status==Gwaiting |
G* schedlink; |
bool readyonstop; |
bool ispanic; |
M* m; // for debuggers, but offset not hard-coded |
M* lockedm; |
M* idlem; |
// int32 sig; |
// uintptr sigcode0; |
// uintptr sigcode1; |
// uintptr sigpc; |
uintptr gopc; // pc of go statement that created this goroutine |
|
ucontext_t context; |
void* stack_context[10]; |
}; |
|
struct M |
{ |
G* g0; // goroutine with scheduling stack |
G* gsignal; // signal-handling G |
G* curg; // current running goroutine |
int32 id; |
int32 mallocing; |
int32 gcing; |
int32 locks; |
int32 nomemprof; |
int32 waitnextg; |
int32 dying; |
int32 profilehz; |
int32 helpgc; |
uint32 fastrand; |
Note havenextg; |
G* nextg; |
M* alllink; // on allm |
M* schedlink; |
MCache *mcache; |
G* lockedg; |
G* idleg; |
M* nextwaitm; // next M waiting for lock |
uintptr waitsema; // semaphore for parking on locks |
uint32 waitsemacount; |
uint32 waitsemalock; |
}; |
|
struct SigTab |
{ |
int32 sig; |
int32 flags; |
}; |
enum |
{ |
SigCatch = 1<<0, |
SigIgnore = 1<<1, |
SigRestart = 1<<2, |
SigQueue = 1<<3, |
SigPanic = 1<<4, |
}; |
|
/* Macros. */ |
|
#ifdef GOOS_windows |
enum { |
Windows = 1 |
}; |
#else |
enum { |
Windows = 0 |
}; |
#endif |
|
struct Timers |
{ |
Lock; |
G *timerproc; |
bool sleeping; |
bool rescheduling; |
Note waitnote; |
Timer **t; |
int32 len; |
int32 cap; |
}; |
|
// Package time knows the layout of this structure. |
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer. |
struct Timer |
{ |
int32 i; // heap index |
|
// Timer wakes up at when, and then at when+period, ... (period > 0 only) |
// each time calling f(now, arg) in the timer goroutine, so f must be |
// a well-behaved function and not block. |
int64 when; |
int64 period; |
void (*f)(int64, Eface); |
Eface arg; |
}; |
|
/* |
* defined macros |
* you need super-gopher-guru privilege |
* to add this list. |
*/ |
#define nelem(x) (sizeof(x)/sizeof((x)[0])) |
#define nil ((void*)0) |
#define USED(v) ((void) v) |
|
/* |
* external data |
*/ |
G* runtime_allg; |
G* runtime_lastg; |
M* runtime_allm; |
extern int32 runtime_gomaxprocs; |
extern bool runtime_singleproc; |
extern uint32 runtime_panicking; |
extern int32 runtime_gcwaiting; // gc is waiting to run |
int32 runtime_ncpu; |
|
/* |
* common functions and data |
*/ |
int32 runtime_findnull(const byte*); |
|
/* |
* very low level c-called |
*/ |
void runtime_args(int32, byte**); |
void runtime_osinit(); |
void runtime_goargs(void); |
void runtime_goenvs(void); |
void runtime_goenvs_unix(void); |
void runtime_throw(const char*) __attribute__ ((noreturn)); |
void runtime_panicstring(const char*) __attribute__ ((noreturn)); |
void* runtime_mal(uintptr); |
void runtime_schedinit(void); |
void runtime_initsig(int32); |
String runtime_gostringnocopy(const byte*); |
void* runtime_mstart(void*); |
G* runtime_malg(int32, byte**, size_t*); |
void runtime_minit(void); |
void runtime_mallocinit(void); |
void runtime_gosched(void); |
void runtime_tsleep(int64); |
M* runtime_newm(void); |
void runtime_goexit(void); |
void runtime_entersyscall(void) __asm__("libgo_syscall.syscall.entersyscall"); |
void runtime_exitsyscall(void) __asm__("libgo_syscall.syscall.exitsyscall"); |
void siginit(void); |
bool __go_sigsend(int32 sig); |
int64 runtime_nanotime(void); |
int64 runtime_cputicks(void); |
|
void runtime_stoptheworld(void); |
void runtime_starttheworld(bool); |
G* __go_go(void (*pfn)(void*), void*); |
|
/* |
* mutual exclusion locks. in the uncontended case, |
* as fast as spin locks (just a few user-level instructions), |
* but on the contention path they sleep in the kernel. |
* a zeroed Lock is unlocked (no need to initialize each lock). |
*/ |
void runtime_lock(Lock*); |
void runtime_unlock(Lock*); |
|
/* |
* sleep and wakeup on one-time events. |
* before any calls to notesleep or notewakeup, |
* must call noteclear to initialize the Note. |
* then, exactly one thread can call notesleep |
* and exactly one thread can call notewakeup (once). |
* once notewakeup has been called, the notesleep |
* will return. future notesleep will return immediately. |
* subsequent noteclear must be called only after |
* previous notesleep has returned, e.g. it's disallowed |
* to call noteclear straight after notewakeup. |
* |
* notetsleep is like notesleep but wakes up after |
* a given number of nanoseconds even if the event |
* has not yet happened. if a goroutine uses notetsleep to |
* wake up early, it must wait to call noteclear until it |
* can be sure that no other goroutine is calling |
* notewakeup. |
*/ |
void runtime_noteclear(Note*); |
void runtime_notesleep(Note*); |
void runtime_notewakeup(Note*); |
void runtime_notetsleep(Note*, int64); |
|
/* |
* low-level synchronization for implementing the above |
*/ |
uintptr runtime_semacreate(void); |
int32 runtime_semasleep(int64); |
void runtime_semawakeup(M*); |
// or |
void runtime_futexsleep(uint32*, uint32, int64); |
void runtime_futexwakeup(uint32*, uint32); |
|
/* |
* runtime go-called |
*/ |
void runtime_panic(Eface); |
|
/* Functions. */ |
#define runtime_panic __go_panic |
#define runtime_printf printf |
#define runtime_malloc(s) __go_alloc(s) |
#define runtime_free(p) __go_free(p) |
#define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2)) |
#define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s)) |
#define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s)) |
#define runtime_exit(s) exit(s) |
MCache* runtime_allocmcache(void); |
void free(void *v); |
struct __go_func_type; |
bool runtime_addfinalizer(void*, void(*fn)(void*), const struct __go_func_type *); |
#define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) |
#define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new) |
#define runtime_xadd(p, v) __sync_add_and_fetch (p, v) |
#define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST) |
#define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) |
#define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) |
#define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST) |
#define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST) |
|
void runtime_dopanic(int32) __attribute__ ((noreturn)); |
void runtime_startpanic(void); |
void runtime_ready(G*); |
const byte* runtime_getenv(const char*); |
int32 runtime_atoi(const byte*); |
uint32 runtime_fastrand1(void); |
|
void runtime_sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp); |
void runtime_resetcpuprofiler(int32); |
void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32); |
void runtime_usleep(uint32); |
|
void runtime_semacquire(uint32 volatile *); |
void runtime_semrelease(uint32 volatile *); |
int32 runtime_gomaxprocsfunc(int32 n); |
void runtime_procyield(uint32); |
void runtime_osyield(void); |
void runtime_LockOSThread(void) __asm__("libgo_runtime.runtime.LockOSThread"); |
void runtime_UnlockOSThread(void) __asm__("libgo_runtime.runtime.UnlockOSThread"); |
|
/* |
* low level C-called |
*/ |
#define runtime_mmap mmap |
#define runtime_munmap munmap |
#define runtime_madvise madvise |
#define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size)) |
|
struct __go_func_type; |
void reflect_call(const struct __go_func_type *, const void *, _Bool, _Bool, |
void **, void **) |
asm ("libgo_reflect.reflect.call"); |
|
#ifdef __rtems__ |
void __wrap_rtems_task_variable_add(void **); |
#endif |
|
void runtime_time_scan(void (*)(byte*, int64)); |
/go-unreflect.c
0,0 → 1,34
/* go-unreflect.c -- implement unsafe.Unreflect for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-type.h" |
#include "interface.h" |
|
/* Implement unsafe.Unreflect. */ |
|
struct __go_empty_interface Unreflect (struct __go_empty_interface type, |
void *object) |
asm ("libgo_unsafe.unsafe.Unreflect"); |
|
struct __go_empty_interface |
Unreflect (struct __go_empty_interface type, void *object) |
{ |
struct __go_empty_interface ret; |
|
if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
/* FIXME: We should check __type_descriptor to verify that this is |
really a type descriptor. */ |
ret.__type_descriptor = type.__object; |
if (__go_is_pointer_type (ret.__type_descriptor)) |
ret.__object = *(void **) object; |
else |
ret.__object = object; |
return ret; |
} |
/go-strslice.c
0,0 → 1,27
/* go-strslice.c -- the go string slice function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "go-panic.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_string |
__go_string_slice (struct __go_string s, int start, int end) |
{ |
int len; |
struct __go_string ret; |
|
len = s.__length; |
if (end == -1) |
end = len; |
if (start > len || end < start || end > len) |
runtime_panicstring ("string index out of bounds"); |
ret.__data = s.__data + start; |
ret.__length = end - start; |
return ret; |
} |
/go-map-len.c
0,0 → 1,23
/* go-map-len.c -- return the length of a map. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "go-assert.h" |
#include "map.h" |
|
/* Return the length of a map. This could be done inline, of course, |
but I'm doing it as a function for now to make it easy to change |
the map structure. */ |
|
int |
__go_map_len (struct __go_map *map) |
{ |
if (map == NULL) |
return 0; |
__go_assert (map->__element_count == (uintptr_t) (int) map->__element_count); |
return map->__element_count; |
} |
/go-panic.c
0,0 → 1,107
/* go-panic.c -- support for the go panic function. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stdio.h> |
#include <stdlib.h> |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
#include "go-alloc.h" |
#include "go-defer.h" |
#include "go-panic.h" |
#include "go-string.h" |
#include "interface.h" |
|
/* Print the panic stack. This is used when there is no recover. */ |
|
static void |
__printpanics (struct __go_panic_stack *p) |
{ |
if (p->__next != NULL) |
{ |
__printpanics (p->__next); |
fprintf (stderr, "\t"); |
} |
fprintf (stderr, "panic: "); |
printany (p->__arg); |
if (p->__was_recovered) |
fprintf (stderr, " [recovered]"); |
fputc ('\n', stderr); |
} |
|
/* This implements __go_panic which is used for the panic |
function. */ |
|
void |
__go_panic (struct __go_empty_interface arg) |
{ |
G *g; |
struct __go_panic_stack *n; |
|
g = runtime_g (); |
|
n = (struct __go_panic_stack *) __go_alloc (sizeof (struct __go_panic_stack)); |
n->__arg = arg; |
n->__next = g->panic; |
g->panic = n; |
|
/* Run all the defer functions. */ |
|
while (1) |
{ |
struct __go_defer_stack *d; |
void (*pfn) (void *); |
|
d = g->defer; |
if (d == NULL) |
break; |
|
pfn = d->__pfn; |
d->__pfn = NULL; |
|
if (pfn != NULL) |
{ |
(*pfn) (d->__arg); |
|
if (n->__was_recovered) |
{ |
/* Some defer function called recover. That means that |
we should stop running this panic. */ |
|
g->panic = n->__next; |
__go_free (n); |
|
/* Now unwind the stack by throwing an exception. The |
compiler has arranged to create exception handlers in |
each function which uses a defer statement. These |
exception handlers will check whether the entry on |
the top of the defer stack is from the current |
function. If it is, we have unwound the stack far |
enough. */ |
__go_unwind_stack (); |
|
/* __go_unwind_stack should not return. */ |
abort (); |
} |
|
/* Because we executed that defer function by a panic, and |
it did not call recover, we know that we are not |
returning from the calling function--we are panicing |
through it. */ |
*d->__frame = 0; |
} |
|
g->defer = d->__next; |
__go_free (d); |
} |
|
/* The panic was not recovered. */ |
|
runtime_startpanic (); |
__printpanics (g->panic); |
runtime_dopanic (0); |
} |
/go-panic.h
0,0 → 1,62
/* go-panic.h -- declare the go panic functions. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_GO_PANIC_H |
#define LIBGO_GO_PANIC_H |
|
#include "interface.h" |
|
struct __go_string; |
struct __go_type_descriptor; |
struct __go_defer_stack; |
|
/* The stack of panic calls. */ |
|
struct __go_panic_stack |
{ |
/* The next entry in the stack. */ |
struct __go_panic_stack *__next; |
|
/* The value associated with this panic. */ |
struct __go_empty_interface __arg; |
|
/* Whether this panic has been recovered. */ |
_Bool __was_recovered; |
|
/* Whether this panic was pushed on the stack because of an |
exception thrown in some other language. */ |
_Bool __is_foreign; |
}; |
|
extern void __go_panic (struct __go_empty_interface) |
__attribute__ ((noreturn)); |
|
extern void __go_print_string (struct __go_string); |
|
extern struct __go_empty_interface __go_recover (void); |
|
extern void __go_unwind_stack (void); |
|
/* Functions defined in libgo/go/runtime/error.go. */ |
|
extern void newTypeAssertionError(const struct __go_type_descriptor *pt1, |
const struct __go_type_descriptor *pt2, |
const struct __go_type_descriptor *pt3, |
const struct __go_string *ps1, |
const struct __go_string *ps2, |
const struct __go_string *ps3, |
const struct __go_string *pmeth, |
struct __go_empty_interface *ret) |
__asm__ ("libgo_runtime.runtime.NewTypeAssertionError"); |
|
extern void runtime_newErrorString(struct __go_string, |
struct __go_empty_interface *) |
__asm__ ("libgo_runtime.runtime.NewErrorString"); |
|
extern void printany(struct __go_empty_interface) |
__asm__ ("libgo_runtime.runtime.Printany"); |
|
#endif /* !defined(LIBGO_GO_PANIC_H) */ |
/proc.c
0,0 → 1,1545
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include <limits.h> |
#include <stdlib.h> |
#include <pthread.h> |
#include <unistd.h> |
|
#include "config.h" |
#include "runtime.h" |
#include "arch.h" |
#include "defs.h" |
#include "malloc.h" |
#include "go-defer.h" |
|
#ifdef USING_SPLIT_STACK |
|
/* FIXME: These are not declared anywhere. */ |
|
extern void __splitstack_getcontext(void *context[10]); |
|
extern void __splitstack_setcontext(void *context[10]); |
|
extern void *__splitstack_makecontext(size_t, void *context[10], size_t *); |
|
extern void * __splitstack_resetcontext(void *context[10], size_t *); |
|
extern void *__splitstack_find(void *, void *, size_t *, void **, void **, |
void **); |
|
extern void __splitstack_block_signals (int *, int *); |
|
extern void __splitstack_block_signals_context (void *context[10], int *, |
int *); |
|
#endif |
|
#if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK) |
# ifdef PTHREAD_STACK_MIN |
# define StackMin PTHREAD_STACK_MIN |
# else |
# define StackMin 8192 |
# endif |
#else |
# define StackMin 2 * 1024 * 1024 |
#endif |
|
static void schedule(G*); |
|
typedef struct Sched Sched; |
|
M runtime_m0; |
G runtime_g0; // idle goroutine for m0 |
|
#ifdef __rtems__ |
#define __thread |
#endif |
|
static __thread G *g; |
static __thread M *m; |
|
#ifndef SETCONTEXT_CLOBBERS_TLS |
|
static inline void |
initcontext(void) |
{ |
} |
|
static inline void |
fixcontext(ucontext_t *c __attribute__ ((unused))) |
{ |
} |
|
# else |
|
# if defined(__x86_64__) && defined(__sun__) |
|
// x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs |
// register to that of the thread which called getcontext. The effect |
// is that the address of all __thread variables changes. This bug |
// also affects pthread_self() and pthread_getspecific. We work |
// around it by clobbering the context field directly to keep %fs the |
// same. |
|
static __thread greg_t fs; |
|
static inline void |
initcontext(void) |
{ |
ucontext_t c; |
|
getcontext(&c); |
fs = c.uc_mcontext.gregs[REG_FSBASE]; |
} |
|
static inline void |
fixcontext(ucontext_t* c) |
{ |
c->uc_mcontext.gregs[REG_FSBASE] = fs; |
} |
|
# else |
|
# error unknown case for SETCONTEXT_CLOBBERS_TLS |
|
# endif |
|
#endif |
|
// We can not always refer to the TLS variables directly. The |
// compiler will call tls_get_addr to get the address of the variable, |
// and it may hold it in a register across a call to schedule. When |
// we get back from the call we may be running in a different thread, |
// in which case the register now points to the TLS variable for a |
// different thread. We use non-inlinable functions to avoid this |
// when necessary. |
|
G* runtime_g(void) __attribute__ ((noinline, no_split_stack)); |
|
G* |
runtime_g(void) |
{ |
return g; |
} |
|
M* runtime_m(void) __attribute__ ((noinline, no_split_stack)); |
|
M* |
runtime_m(void) |
{ |
return m; |
} |
|
int32 runtime_gcwaiting; |
|
// Go scheduler |
// |
// The go scheduler's job is to match ready-to-run goroutines (`g's) |
// with waiting-for-work schedulers (`m's). If there are ready g's |
// and no waiting m's, ready() will start a new m running in a new |
// OS thread, so that all ready g's can run simultaneously, up to a limit. |
// For now, m's never go away. |
// |
// By default, Go keeps only one kernel thread (m) running user code |
// at a single time; other threads may be blocked in the operating system. |
// Setting the environment variable $GOMAXPROCS or calling |
// runtime.GOMAXPROCS() will change the number of user threads |
// allowed to execute simultaneously. $GOMAXPROCS is thus an |
// approximation of the maximum number of cores to use. |
// |
// Even a program that can run without deadlock in a single process |
// might use more m's if given the chance. For example, the prime |
// sieve will use as many m's as there are primes (up to runtime_sched.mmax), |
// allowing different stages of the pipeline to execute in parallel. |
// We could revisit this choice, only kicking off new m's for blocking |
// system calls, but that would limit the amount of parallel computation |
// that go would try to do. |
// |
// In general, one could imagine all sorts of refinements to the |
// scheduler, but the goal now is just to get something working on |
// Linux and OS X. |
|
struct Sched { |
Lock; |
|
G *gfree; // available g's (status == Gdead) |
int32 goidgen; |
|
G *ghead; // g's waiting to run |
G *gtail; |
int32 gwait; // number of g's waiting to run |
int32 gcount; // number of g's that are alive |
int32 grunning; // number of g's running on cpu or in syscall |
|
M *mhead; // m's waiting for work |
int32 mwait; // number of m's waiting for work |
int32 mcount; // number of m's that have been created |
|
volatile uint32 atomic; // atomic scheduling word (see below) |
|
int32 profilehz; // cpu profiling rate |
|
bool init; // running initialization |
bool lockmain; // init called runtime.LockOSThread |
|
Note stopped; // one g can set waitstop and wait here for m's to stop |
}; |
|
// The atomic word in sched is an atomic uint32 that |
// holds these fields. |
// |
// [15 bits] mcpu number of m's executing on cpu |
// [15 bits] mcpumax max number of m's allowed on cpu |
// [1 bit] waitstop some g is waiting on stopped |
// [1 bit] gwaiting gwait != 0 |
// |
// These fields are the information needed by entersyscall |
// and exitsyscall to decide whether to coordinate with the |
// scheduler. Packing them into a single machine word lets |
// them use a fast path with a single atomic read/write and |
// no lock/unlock. This greatly reduces contention in |
// syscall- or cgo-heavy multithreaded programs. |
// |
// Except for entersyscall and exitsyscall, the manipulations |
// to these fields only happen while holding the schedlock, |
// so the routines holding schedlock only need to worry about |
// what entersyscall and exitsyscall do, not the other routines |
// (which also use the schedlock). |
// |
// In particular, entersyscall and exitsyscall only read mcpumax, |
// waitstop, and gwaiting. They never write them. Thus, writes to those |
// fields can be done (holding schedlock) without fear of write conflicts. |
// There may still be logic conflicts: for example, the set of waitstop must |
// be conditioned on mcpu >= mcpumax or else the wait may be a |
// spurious sleep. The Promela model in proc.p verifies these accesses. |
enum { |
mcpuWidth = 15, |
mcpuMask = (1<<mcpuWidth) - 1, |
mcpuShift = 0, |
mcpumaxShift = mcpuShift + mcpuWidth, |
waitstopShift = mcpumaxShift + mcpuWidth, |
gwaitingShift = waitstopShift+1, |
|
// The max value of GOMAXPROCS is constrained |
// by the max value we can store in the bit fields |
// of the atomic word. Reserve a few high values |
// so that we can detect accidental decrement |
// beyond zero. |
maxgomaxprocs = mcpuMask - 10, |
}; |
|
#define atomic_mcpu(v) (((v)>>mcpuShift)&mcpuMask) |
#define atomic_mcpumax(v) (((v)>>mcpumaxShift)&mcpuMask) |
#define atomic_waitstop(v) (((v)>>waitstopShift)&1) |
#define atomic_gwaiting(v) (((v)>>gwaitingShift)&1) |
|
Sched runtime_sched; |
int32 runtime_gomaxprocs; |
bool runtime_singleproc; |
|
static bool canaddmcpu(void); |
|
// An m that is waiting for notewakeup(&m->havenextg). This may |
// only be accessed while the scheduler lock is held. This is used to |
// minimize the number of times we call notewakeup while the scheduler |
// lock is held, since the m will normally move quickly to lock the |
// scheduler itself, producing lock contention. |
static M* mwakeup; |
|
// Scheduling helpers. Sched must be locked. |
static void gput(G*); // put/get on ghead/gtail |
static G* gget(void); |
static void mput(M*); // put/get on mhead |
static M* mget(G*); |
static void gfput(G*); // put/get on gfree |
static G* gfget(void); |
static void matchmg(void); // match m's to g's |
static void readylocked(G*); // ready, but sched is locked |
static void mnextg(M*, G*); |
static void mcommoninit(M*); |
|
void |
setmcpumax(uint32 n) |
{ |
uint32 v, w; |
|
for(;;) { |
v = runtime_sched.atomic; |
w = v; |
w &= ~(mcpuMask<<mcpumaxShift); |
w |= n<<mcpumaxShift; |
if(runtime_cas(&runtime_sched.atomic, v, w)) |
break; |
} |
} |
|
// First function run by a new goroutine. This replaces gogocall. |
static void |
kickoff(void) |
{ |
void (*fn)(void*); |
|
fn = (void (*)(void*))(g->entry); |
fn(g->param); |
runtime_goexit(); |
} |
|
// Switch context to a different goroutine. This is like longjmp. |
static void runtime_gogo(G*) __attribute__ ((noinline)); |
static void |
runtime_gogo(G* newg) |
{ |
#ifdef USING_SPLIT_STACK |
__splitstack_setcontext(&newg->stack_context[0]); |
#endif |
g = newg; |
newg->fromgogo = true; |
fixcontext(&newg->context); |
setcontext(&newg->context); |
runtime_throw("gogo setcontext returned"); |
} |
|
// Save context and call fn passing g as a parameter. This is like |
// setjmp. Because getcontext always returns 0, unlike setjmp, we use |
// g->fromgogo as a code. It will be true if we got here via |
// setcontext. g == nil the first time this is called in a new m. |
static void runtime_mcall(void (*)(G*)) __attribute__ ((noinline)); |
static void |
runtime_mcall(void (*pfn)(G*)) |
{ |
M *mp; |
G *gp; |
#ifndef USING_SPLIT_STACK |
int i; |
#endif |
|
// Ensure that all registers are on the stack for the garbage |
// collector. |
__builtin_unwind_init(); |
|
mp = m; |
gp = g; |
if(gp == mp->g0) |
runtime_throw("runtime: mcall called on m->g0 stack"); |
|
if(gp != nil) { |
|
#ifdef USING_SPLIT_STACK |
__splitstack_getcontext(&g->stack_context[0]); |
#else |
gp->gcnext_sp = &i; |
#endif |
gp->fromgogo = false; |
getcontext(&gp->context); |
|
// When we return from getcontext, we may be running |
// in a new thread. That means that m and g may have |
// changed. They are global variables so we will |
// reload them, but the addresses of m and g may be |
// cached in our local stack frame, and those |
// addresses may be wrong. Call functions to reload |
// the values for this thread. |
mp = runtime_m(); |
gp = runtime_g(); |
} |
if (gp == nil || !gp->fromgogo) { |
#ifdef USING_SPLIT_STACK |
__splitstack_setcontext(&mp->g0->stack_context[0]); |
#endif |
mp->g0->entry = (byte*)pfn; |
mp->g0->param = gp; |
|
// It's OK to set g directly here because this case |
// can not occur if we got here via a setcontext to |
// the getcontext call just above. |
g = mp->g0; |
|
fixcontext(&mp->g0->context); |
setcontext(&mp->g0->context); |
runtime_throw("runtime: mcall function returned"); |
} |
} |
|
// The bootstrap sequence is: |
// |
// call osinit |
// call schedinit |
// make & queue new G |
// call runtime_mstart |
// |
// The new G calls runtime_main. |
void |
runtime_schedinit(void) |
{ |
int32 n; |
const byte *p; |
|
m = &runtime_m0; |
g = &runtime_g0; |
m->g0 = g; |
m->curg = g; |
g->m = m; |
|
initcontext(); |
|
m->nomemprof++; |
runtime_mallocinit(); |
mcommoninit(m); |
|
runtime_goargs(); |
runtime_goenvs(); |
|
// For debugging: |
// Allocate internal symbol table representation now, |
// so that we don't need to call malloc when we crash. |
// runtime_findfunc(0); |
|
runtime_gomaxprocs = 1; |
p = runtime_getenv("GOMAXPROCS"); |
if(p != nil && (n = runtime_atoi(p)) != 0) { |
if(n > maxgomaxprocs) |
n = maxgomaxprocs; |
runtime_gomaxprocs = n; |
} |
setmcpumax(runtime_gomaxprocs); |
runtime_singleproc = runtime_gomaxprocs == 1; |
|
canaddmcpu(); // mcpu++ to account for bootstrap m |
m->helpgc = 1; // flag to tell schedule() to mcpu-- |
runtime_sched.grunning++; |
|
// Can not enable GC until all roots are registered. |
// mstats.enablegc = 1; |
m->nomemprof--; |
} |
|
extern void main_init(void) __asm__ ("__go_init_main"); |
extern void main_main(void) __asm__ ("main.main"); |
|
// The main goroutine. |
void |
runtime_main(void) |
{ |
// Lock the main goroutine onto this, the main OS thread, |
// during initialization. Most programs won't care, but a few |
// do require certain calls to be made by the main thread. |
// Those can arrange for main.main to run in the main thread |
// by calling runtime.LockOSThread during initialization |
// to preserve the lock. |
runtime_LockOSThread(); |
runtime_sched.init = true; |
main_init(); |
runtime_sched.init = false; |
if(!runtime_sched.lockmain) |
runtime_UnlockOSThread(); |
|
// For gccgo we have to wait until after main is initialized |
// to enable GC, because initializing main registers the GC |
// roots. |
mstats.enablegc = 1; |
|
main_main(); |
runtime_exit(0); |
for(;;) |
*(int32*)0 = 0; |
} |
|
// Lock the scheduler. |
static void |
schedlock(void) |
{ |
runtime_lock(&runtime_sched); |
} |
|
// Unlock the scheduler. |
static void |
schedunlock(void) |
{ |
M *m; |
|
m = mwakeup; |
mwakeup = nil; |
runtime_unlock(&runtime_sched); |
if(m != nil) |
runtime_notewakeup(&m->havenextg); |
} |
|
void |
runtime_goexit(void) |
{ |
g->status = Gmoribund; |
runtime_gosched(); |
} |
|
void |
runtime_goroutineheader(G *g) |
{ |
const char *status; |
|
switch(g->status) { |
case Gidle: |
status = "idle"; |
break; |
case Grunnable: |
status = "runnable"; |
break; |
case Grunning: |
status = "running"; |
break; |
case Gsyscall: |
status = "syscall"; |
break; |
case Gwaiting: |
if(g->waitreason) |
status = g->waitreason; |
else |
status = "waiting"; |
break; |
case Gmoribund: |
status = "moribund"; |
break; |
default: |
status = "???"; |
break; |
} |
runtime_printf("goroutine %d [%s]:\n", g->goid, status); |
} |
|
void |
runtime_tracebackothers(G *me) |
{ |
G *g; |
|
for(g = runtime_allg; g != nil; g = g->alllink) { |
if(g == me || g->status == Gdead) |
continue; |
runtime_printf("\n"); |
runtime_goroutineheader(g); |
// runtime_traceback(g->sched.pc, g->sched.sp, 0, g); |
} |
} |
|
// Mark this g as m's idle goroutine. |
// This functionality might be used in environments where programs |
// are limited to a single thread, to simulate a select-driven |
// network server. It is not exposed via the standard runtime API. |
void |
runtime_idlegoroutine(void) |
{ |
if(g->idlem != nil) |
runtime_throw("g is already an idle goroutine"); |
g->idlem = m; |
} |
|
static void |
mcommoninit(M *m) |
{ |
// Add to runtime_allm so garbage collector doesn't free m |
// when it is just in a register or thread-local storage. |
m->alllink = runtime_allm; |
// runtime_Cgocalls() iterates over allm w/o schedlock, |
// so we need to publish it safely. |
runtime_atomicstorep((void**)&runtime_allm, m); |
|
m->id = runtime_sched.mcount++; |
m->fastrand = 0x49f6428aUL + m->id + runtime_cputicks(); |
|
if(m->mcache == nil) |
m->mcache = runtime_allocmcache(); |
} |
|
// Try to increment mcpu. Report whether succeeded. |
static bool |
canaddmcpu(void) |
{ |
uint32 v; |
|
for(;;) { |
v = runtime_sched.atomic; |
if(atomic_mcpu(v) >= atomic_mcpumax(v)) |
return 0; |
if(runtime_cas(&runtime_sched.atomic, v, v+(1<<mcpuShift))) |
return 1; |
} |
} |
|
// Put on `g' queue. Sched must be locked. |
static void |
gput(G *g) |
{ |
M *m; |
|
// If g is wired, hand it off directly. |
if((m = g->lockedm) != nil && canaddmcpu()) { |
mnextg(m, g); |
return; |
} |
|
// If g is the idle goroutine for an m, hand it off. |
if(g->idlem != nil) { |
if(g->idlem->idleg != nil) { |
runtime_printf("m%d idle out of sync: g%d g%d\n", |
g->idlem->id, |
g->idlem->idleg->goid, g->goid); |
runtime_throw("runtime: double idle"); |
} |
g->idlem->idleg = g; |
return; |
} |
|
g->schedlink = nil; |
if(runtime_sched.ghead == nil) |
runtime_sched.ghead = g; |
else |
runtime_sched.gtail->schedlink = g; |
runtime_sched.gtail = g; |
|
// increment gwait. |
// if it transitions to nonzero, set atomic gwaiting bit. |
if(runtime_sched.gwait++ == 0) |
runtime_xadd(&runtime_sched.atomic, 1<<gwaitingShift); |
} |
|
// Report whether gget would return something. |
static bool |
haveg(void) |
{ |
return runtime_sched.ghead != nil || m->idleg != nil; |
} |
|
// Get from `g' queue. Sched must be locked. |
static G* |
gget(void) |
{ |
G *g; |
|
g = runtime_sched.ghead; |
if(g){ |
runtime_sched.ghead = g->schedlink; |
if(runtime_sched.ghead == nil) |
runtime_sched.gtail = nil; |
// decrement gwait. |
// if it transitions to zero, clear atomic gwaiting bit. |
if(--runtime_sched.gwait == 0) |
runtime_xadd(&runtime_sched.atomic, -1<<gwaitingShift); |
} else if(m->idleg != nil) { |
g = m->idleg; |
m->idleg = nil; |
} |
return g; |
} |
|
// Put on `m' list. Sched must be locked. |
static void |
mput(M *m) |
{ |
m->schedlink = runtime_sched.mhead; |
runtime_sched.mhead = m; |
runtime_sched.mwait++; |
} |
|
// Get an `m' to run `g'. Sched must be locked. |
static M* |
mget(G *g) |
{ |
M *m; |
|
// if g has its own m, use it. |
if(g && (m = g->lockedm) != nil) |
return m; |
|
// otherwise use general m pool. |
if((m = runtime_sched.mhead) != nil){ |
runtime_sched.mhead = m->schedlink; |
runtime_sched.mwait--; |
} |
return m; |
} |
|
// Mark g ready to run. |
void |
runtime_ready(G *g) |
{ |
schedlock(); |
readylocked(g); |
schedunlock(); |
} |
|
// Mark g ready to run. Sched is already locked. |
// G might be running already and about to stop. |
// The sched lock protects g->status from changing underfoot. |
static void |
readylocked(G *g) |
{ |
if(g->m){ |
// Running on another machine. |
// Ready it when it stops. |
g->readyonstop = 1; |
return; |
} |
|
// Mark runnable. |
if(g->status == Grunnable || g->status == Grunning) { |
runtime_printf("goroutine %d has status %d\n", g->goid, g->status); |
runtime_throw("bad g->status in ready"); |
} |
g->status = Grunnable; |
|
gput(g); |
matchmg(); |
} |
|
// Same as readylocked but a different symbol so that |
// debuggers can set a breakpoint here and catch all |
// new goroutines. |
static void |
newprocreadylocked(G *g) |
{ |
readylocked(g); |
} |
|
// Pass g to m for running. |
// Caller has already incremented mcpu. |
static void |
mnextg(M *m, G *g) |
{ |
runtime_sched.grunning++; |
m->nextg = g; |
if(m->waitnextg) { |
m->waitnextg = 0; |
if(mwakeup != nil) |
runtime_notewakeup(&mwakeup->havenextg); |
mwakeup = m; |
} |
} |
|
// Get the next goroutine that m should run. |
// Sched must be locked on entry, is unlocked on exit. |
// Makes sure that at most $GOMAXPROCS g's are |
// running on cpus (not in system calls) at any given time. |
static G* |
nextgandunlock(void) |
{ |
G *gp; |
uint32 v; |
|
top: |
if(atomic_mcpu(runtime_sched.atomic) >= maxgomaxprocs) |
runtime_throw("negative mcpu"); |
|
// If there is a g waiting as m->nextg, the mcpu++ |
// happened before it was passed to mnextg. |
if(m->nextg != nil) { |
gp = m->nextg; |
m->nextg = nil; |
schedunlock(); |
return gp; |
} |
|
if(m->lockedg != nil) { |
// We can only run one g, and it's not available. |
// Make sure some other cpu is running to handle |
// the ordinary run queue. |
if(runtime_sched.gwait != 0) { |
matchmg(); |
// m->lockedg might have been on the queue. |
if(m->nextg != nil) { |
gp = m->nextg; |
m->nextg = nil; |
schedunlock(); |
return gp; |
} |
} |
} else { |
// Look for work on global queue. |
while(haveg() && canaddmcpu()) { |
gp = gget(); |
if(gp == nil) |
runtime_throw("gget inconsistency"); |
|
if(gp->lockedm) { |
mnextg(gp->lockedm, gp); |
continue; |
} |
runtime_sched.grunning++; |
schedunlock(); |
return gp; |
} |
|
// The while loop ended either because the g queue is empty |
// or because we have maxed out our m procs running go |
// code (mcpu >= mcpumax). We need to check that |
// concurrent actions by entersyscall/exitsyscall cannot |
// invalidate the decision to end the loop. |
// |
// We hold the sched lock, so no one else is manipulating the |
// g queue or changing mcpumax. Entersyscall can decrement |
// mcpu, but if does so when there is something on the g queue, |
// the gwait bit will be set, so entersyscall will take the slow path |
// and use the sched lock. So it cannot invalidate our decision. |
// |
// Wait on global m queue. |
mput(m); |
} |
|
v = runtime_atomicload(&runtime_sched.atomic); |
if(runtime_sched.grunning == 0) |
runtime_throw("all goroutines are asleep - deadlock!"); |
m->nextg = nil; |
m->waitnextg = 1; |
runtime_noteclear(&m->havenextg); |
|
// Stoptheworld is waiting for all but its cpu to go to stop. |
// Entersyscall might have decremented mcpu too, but if so |
// it will see the waitstop and take the slow path. |
// Exitsyscall never increments mcpu beyond mcpumax. |
if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { |
// set waitstop = 0 (known to be 1) |
runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); |
runtime_notewakeup(&runtime_sched.stopped); |
} |
schedunlock(); |
|
runtime_notesleep(&m->havenextg); |
if(m->helpgc) { |
runtime_gchelper(); |
m->helpgc = 0; |
runtime_lock(&runtime_sched); |
goto top; |
} |
if((gp = m->nextg) == nil) |
runtime_throw("bad m->nextg in nextgoroutine"); |
m->nextg = nil; |
return gp; |
} |
|
int32 |
runtime_helpgc(bool *extra) |
{ |
M *mp; |
int32 n, max; |
|
// Figure out how many CPUs to use. |
// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. |
max = runtime_gomaxprocs; |
if(max > runtime_ncpu) |
max = runtime_ncpu > 0 ? runtime_ncpu : 1; |
if(max > MaxGcproc) |
max = MaxGcproc; |
|
// We're going to use one CPU no matter what. |
// Figure out the max number of additional CPUs. |
max--; |
|
runtime_lock(&runtime_sched); |
n = 0; |
while(n < max && (mp = mget(nil)) != nil) { |
n++; |
mp->helpgc = 1; |
mp->waitnextg = 0; |
runtime_notewakeup(&mp->havenextg); |
} |
runtime_unlock(&runtime_sched); |
if(extra) |
*extra = n != max; |
return n; |
} |
|
void |
runtime_stoptheworld(void) |
{ |
uint32 v; |
|
schedlock(); |
runtime_gcwaiting = 1; |
|
setmcpumax(1); |
|
// while mcpu > 1 |
for(;;) { |
v = runtime_sched.atomic; |
if(atomic_mcpu(v) <= 1) |
break; |
|
// It would be unsafe for multiple threads to be using |
// the stopped note at once, but there is only |
// ever one thread doing garbage collection. |
runtime_noteclear(&runtime_sched.stopped); |
if(atomic_waitstop(v)) |
runtime_throw("invalid waitstop"); |
|
// atomic { waitstop = 1 }, predicated on mcpu <= 1 check above |
// still being true. |
if(!runtime_cas(&runtime_sched.atomic, v, v+(1<<waitstopShift))) |
continue; |
|
schedunlock(); |
runtime_notesleep(&runtime_sched.stopped); |
schedlock(); |
} |
runtime_singleproc = runtime_gomaxprocs == 1; |
schedunlock(); |
} |
|
void |
runtime_starttheworld(bool extra) |
{ |
M *m; |
|
schedlock(); |
runtime_gcwaiting = 0; |
setmcpumax(runtime_gomaxprocs); |
matchmg(); |
if(extra && canaddmcpu()) { |
// Start a new m that will (we hope) be idle |
// and so available to help when the next |
// garbage collection happens. |
// canaddmcpu above did mcpu++ |
// (necessary, because m will be doing various |
// initialization work so is definitely running), |
// but m is not running a specific goroutine, |
// so set the helpgc flag as a signal to m's |
// first schedule(nil) to mcpu-- and grunning--. |
m = runtime_newm(); |
m->helpgc = 1; |
runtime_sched.grunning++; |
} |
schedunlock(); |
} |
|
// Called to start an M. |
void* |
runtime_mstart(void* mp) |
{ |
m = (M*)mp; |
g = m->g0; |
|
initcontext(); |
|
g->entry = nil; |
g->param = nil; |
|
// Record top of stack for use by mcall. |
// Once we call schedule we're never coming back, |
// so other calls can reuse this stack space. |
#ifdef USING_SPLIT_STACK |
__splitstack_getcontext(&g->stack_context[0]); |
#else |
g->gcinitial_sp = ∓ |
// Setting gcstack_size to 0 is a marker meaning that gcinitial_sp |
// is the top of the stack, not the bottom. |
g->gcstack_size = 0; |
g->gcnext_sp = ∓ |
#endif |
getcontext(&g->context); |
|
if(g->entry != nil) { |
// Got here from mcall. |
void (*pfn)(G*) = (void (*)(G*))g->entry; |
G* gp = (G*)g->param; |
pfn(gp); |
*(int*)0x21 = 0x21; |
} |
runtime_minit(); |
|
#ifdef USING_SPLIT_STACK |
{ |
int dont_block_signals = 0; |
__splitstack_block_signals(&dont_block_signals, nil); |
} |
#endif |
|
schedule(nil); |
return nil; |
} |
|
typedef struct CgoThreadStart CgoThreadStart; |
struct CgoThreadStart |
{ |
M *m; |
G *g; |
void (*fn)(void); |
}; |
|
// Kick off new m's as needed (up to mcpumax). |
// Sched is locked. |
static void |
matchmg(void) |
{ |
G *gp; |
M *mp; |
|
if(m->mallocing || m->gcing) |
return; |
|
while(haveg() && canaddmcpu()) { |
gp = gget(); |
if(gp == nil) |
runtime_throw("gget inconsistency"); |
|
// Find the m that will run gp. |
if((mp = mget(gp)) == nil) |
mp = runtime_newm(); |
mnextg(mp, gp); |
} |
} |
|
// Create a new m. It will start off with a call to runtime_mstart. |
M* |
runtime_newm(void) |
{ |
M *m; |
pthread_attr_t attr; |
pthread_t tid; |
|
m = runtime_malloc(sizeof(M)); |
mcommoninit(m); |
m->g0 = runtime_malg(-1, nil, nil); |
|
if(pthread_attr_init(&attr) != 0) |
runtime_throw("pthread_attr_init"); |
if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0) |
runtime_throw("pthread_attr_setdetachstate"); |
|
#ifndef PTHREAD_STACK_MIN |
#define PTHREAD_STACK_MIN 8192 |
#endif |
if(pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0) |
runtime_throw("pthread_attr_setstacksize"); |
|
if(pthread_create(&tid, &attr, runtime_mstart, m) != 0) |
runtime_throw("pthread_create"); |
|
return m; |
} |
|
// One round of scheduler: find a goroutine and run it. |
// The argument is the goroutine that was running before |
// schedule was called, or nil if this is the first call. |
// Never returns. |
static void |
schedule(G *gp) |
{ |
int32 hz; |
uint32 v; |
|
schedlock(); |
if(gp != nil) { |
// Just finished running gp. |
gp->m = nil; |
runtime_sched.grunning--; |
|
// atomic { mcpu-- } |
v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); |
if(atomic_mcpu(v) > maxgomaxprocs) |
runtime_throw("negative mcpu in scheduler"); |
|
switch(gp->status){ |
case Grunnable: |
case Gdead: |
// Shouldn't have been running! |
runtime_throw("bad gp->status in sched"); |
case Grunning: |
gp->status = Grunnable; |
gput(gp); |
break; |
case Gmoribund: |
gp->status = Gdead; |
if(gp->lockedm) { |
gp->lockedm = nil; |
m->lockedg = nil; |
} |
gp->idlem = nil; |
gfput(gp); |
if(--runtime_sched.gcount == 0) |
runtime_exit(0); |
break; |
} |
if(gp->readyonstop){ |
gp->readyonstop = 0; |
readylocked(gp); |
} |
} else if(m->helpgc) { |
// Bootstrap m or new m started by starttheworld. |
// atomic { mcpu-- } |
v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); |
if(atomic_mcpu(v) > maxgomaxprocs) |
runtime_throw("negative mcpu in scheduler"); |
// Compensate for increment in starttheworld(). |
runtime_sched.grunning--; |
m->helpgc = 0; |
} else if(m->nextg != nil) { |
// New m started by matchmg. |
} else { |
runtime_throw("invalid m state in scheduler"); |
} |
|
// Find (or wait for) g to run. Unlocks runtime_sched. |
gp = nextgandunlock(); |
gp->readyonstop = 0; |
gp->status = Grunning; |
m->curg = gp; |
gp->m = m; |
|
// Check whether the profiler needs to be turned on or off. |
hz = runtime_sched.profilehz; |
if(m->profilehz != hz) |
runtime_resetcpuprofiler(hz); |
|
runtime_gogo(gp); |
} |
|
// Enter scheduler. If g->status is Grunning, |
// re-queues g and runs everyone else who is waiting |
// before running g again. If g->status is Gmoribund, |
// kills off g. |
void |
runtime_gosched(void) |
{ |
if(m->locks != 0) |
runtime_throw("gosched holding locks"); |
if(g == m->g0) |
runtime_throw("gosched of g0"); |
runtime_mcall(schedule); |
} |
|
// The goroutine g is about to enter a system call. |
// Record that it's not using the cpu anymore. |
// This is called only from the go syscall library and cgocall, |
// not from the low-level system calls used by the runtime. |
// |
// Entersyscall cannot split the stack: the runtime_gosave must |
// make g->sched refer to the caller's stack segment, because |
// entersyscall is going to return immediately after. |
// It's okay to call matchmg and notewakeup even after |
// decrementing mcpu, because we haven't released the |
// sched lock yet, so the garbage collector cannot be running. |
|
void runtime_entersyscall(void) __attribute__ ((no_split_stack)); |
|
void |
runtime_entersyscall(void) |
{ |
uint32 v; |
|
// Leave SP around for gc and traceback. |
#ifdef USING_SPLIT_STACK |
g->gcstack = __splitstack_find(NULL, NULL, &g->gcstack_size, |
&g->gcnext_segment, &g->gcnext_sp, |
&g->gcinitial_sp); |
#else |
g->gcnext_sp = (byte *) &v; |
#endif |
|
// Save the registers in the g structure so that any pointers |
// held in registers will be seen by the garbage collector. |
// We could use getcontext here, but setjmp is more efficient |
// because it doesn't need to save the signal mask. |
setjmp(g->gcregs); |
|
g->status = Gsyscall; |
|
// Fast path. |
// The slow path inside the schedlock/schedunlock will get |
// through without stopping if it does: |
// mcpu-- |
// gwait not true |
// waitstop && mcpu <= mcpumax not true |
// If we can do the same with a single atomic add, |
// then we can skip the locks. |
v = runtime_xadd(&runtime_sched.atomic, -1<<mcpuShift); |
if(!atomic_gwaiting(v) && (!atomic_waitstop(v) || atomic_mcpu(v) > atomic_mcpumax(v))) |
return; |
|
schedlock(); |
v = runtime_atomicload(&runtime_sched.atomic); |
if(atomic_gwaiting(v)) { |
matchmg(); |
v = runtime_atomicload(&runtime_sched.atomic); |
} |
if(atomic_waitstop(v) && atomic_mcpu(v) <= atomic_mcpumax(v)) { |
runtime_xadd(&runtime_sched.atomic, -1<<waitstopShift); |
runtime_notewakeup(&runtime_sched.stopped); |
} |
|
schedunlock(); |
} |
|
// The goroutine g exited its system call. |
// Arrange for it to run on a cpu again. |
// This is called only from the go syscall library, not |
// from the low-level system calls used by the runtime. |
void |
runtime_exitsyscall(void) |
{ |
G *gp; |
uint32 v; |
|
// Fast path. |
// If we can do the mcpu++ bookkeeping and |
// find that we still have mcpu <= mcpumax, then we can |
// start executing Go code immediately, without having to |
// schedlock/schedunlock. |
gp = g; |
v = runtime_xadd(&runtime_sched.atomic, (1<<mcpuShift)); |
if(m->profilehz == runtime_sched.profilehz && atomic_mcpu(v) <= atomic_mcpumax(v)) { |
// There's a cpu for us, so we can run. |
gp->status = Grunning; |
// Garbage collector isn't running (since we are), |
// so okay to clear gcstack. |
#ifdef USING_SPLIT_STACK |
gp->gcstack = nil; |
#endif |
gp->gcnext_sp = nil; |
runtime_memclr(gp->gcregs, sizeof gp->gcregs); |
return; |
} |
|
// Tell scheduler to put g back on the run queue: |
// mostly equivalent to g->status = Grunning, |
// but keeps the garbage collector from thinking |
// that g is running right now, which it's not. |
gp->readyonstop = 1; |
|
// All the cpus are taken. |
// The scheduler will ready g and put this m to sleep. |
// When the scheduler takes g away from m, |
// it will undo the runtime_sched.mcpu++ above. |
runtime_gosched(); |
|
// Gosched returned, so we're allowed to run now. |
// Delete the gcstack information that we left for |
// the garbage collector during the system call. |
// Must wait until now because until gosched returns |
// we don't know for sure that the garbage collector |
// is not running. |
#ifdef USING_SPLIT_STACK |
gp->gcstack = nil; |
#endif |
gp->gcnext_sp = nil; |
runtime_memclr(gp->gcregs, sizeof gp->gcregs); |
} |
|
// Allocate a new g, with a stack big enough for stacksize bytes. |
G* |
runtime_malg(int32 stacksize, byte** ret_stack, size_t* ret_stacksize) |
{ |
G *newg; |
|
newg = runtime_malloc(sizeof(G)); |
if(stacksize >= 0) { |
#if USING_SPLIT_STACK |
int dont_block_signals = 0; |
|
*ret_stack = __splitstack_makecontext(stacksize, |
&newg->stack_context[0], |
ret_stacksize); |
__splitstack_block_signals_context(&newg->stack_context[0], |
&dont_block_signals, nil); |
#else |
*ret_stack = runtime_mallocgc(stacksize, FlagNoProfiling|FlagNoGC, 0, 0); |
*ret_stacksize = stacksize; |
newg->gcinitial_sp = *ret_stack; |
newg->gcstack_size = stacksize; |
#endif |
} |
return newg; |
} |
|
/* For runtime package testing. */ |
|
void runtime_testing_entersyscall(void) |
__asm__("libgo_runtime.runtime.entersyscall"); |
|
void |
runtime_testing_entersyscall() |
{ |
runtime_entersyscall(); |
} |
|
void runtime_testing_exitsyscall(void) |
__asm__("libgo_runtime.runtime.exitsyscall"); |
|
void |
runtime_testing_exitsyscall() |
{ |
runtime_exitsyscall(); |
} |
|
G* |
__go_go(void (*fn)(void*), void* arg) |
{ |
byte *sp; |
size_t spsize; |
G * volatile newg; // volatile to avoid longjmp warning |
|
schedlock(); |
|
if((newg = gfget()) != nil){ |
#ifdef USING_SPLIT_STACK |
int dont_block_signals = 0; |
|
sp = __splitstack_resetcontext(&newg->stack_context[0], |
&spsize); |
__splitstack_block_signals_context(&newg->stack_context[0], |
&dont_block_signals, nil); |
#else |
sp = newg->gcinitial_sp; |
spsize = newg->gcstack_size; |
if(spsize == 0) |
runtime_throw("bad spsize in __go_go"); |
newg->gcnext_sp = sp; |
#endif |
} else { |
newg = runtime_malg(StackMin, &sp, &spsize); |
if(runtime_lastg == nil) |
runtime_allg = newg; |
else |
runtime_lastg->alllink = newg; |
runtime_lastg = newg; |
} |
newg->status = Gwaiting; |
newg->waitreason = "new goroutine"; |
|
newg->entry = (byte*)fn; |
newg->param = arg; |
newg->gopc = (uintptr)__builtin_return_address(0); |
|
runtime_sched.gcount++; |
runtime_sched.goidgen++; |
newg->goid = runtime_sched.goidgen; |
|
if(sp == nil) |
runtime_throw("nil g->stack0"); |
|
getcontext(&newg->context); |
newg->context.uc_stack.ss_sp = sp; |
#ifdef MAKECONTEXT_STACK_TOP |
newg->context.uc_stack.ss_sp += spsize; |
#endif |
newg->context.uc_stack.ss_size = spsize; |
makecontext(&newg->context, kickoff, 0); |
|
newprocreadylocked(newg); |
schedunlock(); |
|
return newg; |
//printf(" goid=%d\n", newg->goid); |
} |
|
// Put on gfree list. Sched must be locked. |
static void |
gfput(G *g) |
{ |
g->schedlink = runtime_sched.gfree; |
runtime_sched.gfree = g; |
} |
|
// Get from gfree list. Sched must be locked. |
static G* |
gfget(void) |
{ |
G *g; |
|
g = runtime_sched.gfree; |
if(g) |
runtime_sched.gfree = g->schedlink; |
return g; |
} |
|
// Run all deferred functions for the current goroutine. |
static void |
rundefer(void) |
{ |
Defer *d; |
|
while((d = g->defer) != nil) { |
void (*pfn)(void*); |
|
pfn = d->__pfn; |
d->__pfn = nil; |
if (pfn != nil) |
(*pfn)(d->__arg); |
g->defer = d->__next; |
runtime_free(d); |
} |
} |
|
void runtime_Goexit (void) asm ("libgo_runtime.runtime.Goexit"); |
|
void |
runtime_Goexit(void) |
{ |
rundefer(); |
runtime_goexit(); |
} |
|
void runtime_Gosched (void) asm ("libgo_runtime.runtime.Gosched"); |
|
void |
runtime_Gosched(void) |
{ |
runtime_gosched(); |
} |
|
// Implementation of runtime.GOMAXPROCS. |
// delete when scheduler is stronger |
int32 |
runtime_gomaxprocsfunc(int32 n) |
{ |
int32 ret; |
uint32 v; |
|
schedlock(); |
ret = runtime_gomaxprocs; |
if(n <= 0) |
n = ret; |
if(n > maxgomaxprocs) |
n = maxgomaxprocs; |
runtime_gomaxprocs = n; |
if(runtime_gomaxprocs > 1) |
runtime_singleproc = false; |
if(runtime_gcwaiting != 0) { |
if(atomic_mcpumax(runtime_sched.atomic) != 1) |
runtime_throw("invalid mcpumax during gc"); |
schedunlock(); |
return ret; |
} |
|
setmcpumax(n); |
|
// If there are now fewer allowed procs |
// than procs running, stop. |
v = runtime_atomicload(&runtime_sched.atomic); |
if((int32)atomic_mcpu(v) > n) { |
schedunlock(); |
runtime_gosched(); |
return ret; |
} |
// handle more procs |
matchmg(); |
schedunlock(); |
return ret; |
} |
|
void |
runtime_LockOSThread(void) |
{ |
if(m == &runtime_m0 && runtime_sched.init) { |
runtime_sched.lockmain = true; |
return; |
} |
m->lockedg = g; |
g->lockedm = m; |
} |
|
void |
runtime_UnlockOSThread(void) |
{ |
if(m == &runtime_m0 && runtime_sched.init) { |
runtime_sched.lockmain = false; |
return; |
} |
m->lockedg = nil; |
g->lockedm = nil; |
} |
|
bool |
runtime_lockedOSThread(void) |
{ |
return g->lockedm != nil && m->lockedg != nil; |
} |
|
// for testing of callbacks |
|
_Bool runtime_golockedOSThread(void) |
asm("libgo_runtime.runtime.golockedOSThread"); |
|
_Bool |
runtime_golockedOSThread(void) |
{ |
return runtime_lockedOSThread(); |
} |
|
// for testing of wire, unwire |
uint32 |
runtime_mid() |
{ |
return m->id; |
} |
|
int32 runtime_Goroutines (void) |
__asm__ ("libgo_runtime.runtime.Goroutines"); |
|
int32 |
runtime_Goroutines() |
{ |
return runtime_sched.gcount; |
} |
|
int32 |
runtime_mcount(void) |
{ |
return runtime_sched.mcount; |
} |
|
static struct { |
Lock; |
void (*fn)(uintptr*, int32); |
int32 hz; |
uintptr pcbuf[100]; |
} prof; |
|
// Called if we receive a SIGPROF signal. |
void |
runtime_sigprof(uint8 *pc __attribute__ ((unused)), |
uint8 *sp __attribute__ ((unused)), |
uint8 *lr __attribute__ ((unused)), |
G *gp __attribute__ ((unused))) |
{ |
// int32 n; |
|
if(prof.fn == nil || prof.hz == 0) |
return; |
|
runtime_lock(&prof); |
if(prof.fn == nil) { |
runtime_unlock(&prof); |
return; |
} |
// n = runtime_gentraceback(pc, sp, lr, gp, 0, prof.pcbuf, nelem(prof.pcbuf)); |
// if(n > 0) |
// prof.fn(prof.pcbuf, n); |
runtime_unlock(&prof); |
} |
|
// Arrange to call fn with a traceback hz times a second. |
void |
runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) |
{ |
// Force sane arguments. |
if(hz < 0) |
hz = 0; |
if(hz == 0) |
fn = nil; |
if(fn == nil) |
hz = 0; |
|
// Stop profiler on this cpu so that it is safe to lock prof. |
// if a profiling signal came in while we had prof locked, |
// it would deadlock. |
runtime_resetcpuprofiler(0); |
|
runtime_lock(&prof); |
prof.fn = fn; |
prof.hz = hz; |
runtime_unlock(&prof); |
runtime_lock(&runtime_sched); |
runtime_sched.profilehz = hz; |
runtime_unlock(&runtime_sched); |
|
if(hz != 0) |
runtime_resetcpuprofiler(hz); |
} |
/map.goc
0,0 → 1,72
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
package runtime |
#include "runtime.h" |
#include "map.h" |
|
typedef struct __go_map Hmap; |
typedef struct __go_hash_iter hiter; |
|
/* Access a value in a map, returning a value and a presence indicator. */ |
|
func mapaccess2(t *MapType, h *Hmap, key *byte, val *byte) (present bool) { |
byte *mapval; |
size_t valsize; |
|
mapval = __go_map_index(h, key, 0); |
valsize = t->__val_type->__size; |
if (mapval == nil) { |
__builtin_memset(val, 0, valsize); |
present = 0; |
} else { |
__builtin_memcpy(val, mapval, valsize); |
present = 1; |
} |
} |
|
/* Optionally assign a value to a map (m[k] = v, p). */ |
|
func mapassign2(h *Hmap, key *byte, val *byte, p bool) { |
if (!p) { |
__go_map_delete(h, key); |
} else { |
byte *mapval; |
size_t valsize; |
|
mapval = __go_map_index(h, key, 1); |
valsize = h->__descriptor->__map_descriptor->__val_type->__size; |
__builtin_memcpy(mapval, val, valsize); |
} |
} |
|
/* Delete a key from a map. */ |
|
func mapdelete(h *Hmap, key *byte) { |
__go_map_delete(h, key); |
} |
|
/* Initialize a range over a map. */ |
|
func mapiterinit(h *Hmap, it *hiter) { |
__go_mapiterinit(h, it); |
} |
|
/* Move to the next iteration, updating *HITER. */ |
|
func mapiternext(it *hiter) { |
__go_mapiternext(it); |
} |
|
/* Get the key of the current iteration. */ |
|
func mapiter1(it *hiter, key *byte) { |
__go_mapiter1(it, key); |
} |
|
/* Get the key and value of the current iteration. */ |
|
func mapiter2(it *hiter, key *byte, val *byte) { |
__go_mapiter2(it, key, val); |
} |
/go-matherr.c
0,0 → 1,88
/* go-matherr.c -- a Go version of the matherr function. |
|
Copyright 2012 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
/* The gccgo version of the math library calls libc functions. On |
some systems, such as Solaris, those functions will call matherr on |
exceptional conditions. This is a version of matherr appropriate |
for Go, one which returns the values that the Go math library |
expects. This is fine for pure Go programs. For mixed Go and C |
programs this will be problematic if the C programs themselves use |
matherr. Normally the C version of matherr will override this, and |
the Go code will just have to cope. If this turns out to be too |
problematic we can change to run pure Go code in the math library |
on systems that use matherr. */ |
|
#include <math.h> |
#include <stdint.h> |
|
#include "config.h" |
|
#if defined(HAVE_MATHERR) && defined(HAVE_STRUCT_EXCEPTION) |
|
#define PI 3.14159265358979323846264338327950288419716939937510582097494459 |
|
int |
matherr (struct exception* e) |
{ |
const char *n; |
|
if (e->type != DOMAIN) |
return 0; |
|
n = e->name; |
if (__builtin_strcmp (n, "acos") == 0 |
|| __builtin_strcmp (n, "asin") == 0) |
e->retval = __builtin_nan (""); |
else if (__builtin_strcmp (n, "atan2") == 0) |
{ |
if (e->arg1 == 0 && e->arg2 == 0) |
{ |
double nz; |
|
nz = -0.0; |
if (__builtin_memcmp (&e->arg2, &nz, sizeof (double)) != 0) |
e->retval = e->arg1; |
else |
e->retval = copysign (PI, e->arg1); |
} |
else |
return 0; |
} |
else if (__builtin_strcmp (n, "log") == 0 |
|| __builtin_strcmp (n, "log10") == 0) |
e->retval = __builtin_nan (""); |
else if (__builtin_strcmp (n, "pow") == 0) |
{ |
if (e->arg1 < 0) |
e->retval = __builtin_nan (""); |
else if (e->arg1 == 0 && e->arg2 == 0) |
e->retval = 1.0; |
else if (e->arg1 == 0 && e->arg2 < 0) |
{ |
double i; |
|
if (modf (e->arg2, &i) == 0 && ((int64_t) i & 1) == 1) |
e->retval = copysign (__builtin_inf (), e->arg1); |
else |
e->retval = __builtin_inf (); |
} |
else |
return 0; |
} |
else if (__builtin_strcmp (n, "sqrt") == 0) |
{ |
if (e->arg1 < 0) |
e->retval = __builtin_nan (""); |
else |
return 0; |
} |
else |
return 0; |
|
return 1; |
} |
|
#endif |
/go-string-to-byte-array.c
0,0 → 1,25
/* go-string-to-byte-array.c -- convert a string to an array of bytes in Go. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-string.h" |
#include "array.h" |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
struct __go_open_array |
__go_string_to_byte_array (struct __go_string str) |
{ |
unsigned char *data; |
struct __go_open_array ret; |
|
data = (unsigned char *) runtime_mallocgc (str.__length, FlagNoPointers, 1, 0); |
__builtin_memcpy (data, str.__data, str.__length); |
ret.__values = (void *) data; |
ret.__count = str.__length; |
ret.__capacity = str.__length; |
return ret; |
} |
/thread.c
0,0 → 1,149
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include <errno.h> |
#include <signal.h> |
|
#include "runtime.h" |
#include "go-assert.h" |
|
/* For targets which don't have the required sync support. Really |
these should be provided by gcc itself. FIXME. */ |
|
#if !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4) || !defined (HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8) || !defined (HAVE_SYNC_FETCH_AND_ADD_4) || !defined (HAVE_SYNC_ADD_AND_FETCH_8) |
|
static pthread_mutex_t sync_lock = PTHREAD_MUTEX_INITIALIZER; |
|
#endif |
|
#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_4 |
|
_Bool |
__sync_bool_compare_and_swap_4 (uint32*, uint32, uint32) |
__attribute__ ((visibility ("hidden"))); |
|
_Bool |
__sync_bool_compare_and_swap_4 (uint32* ptr, uint32 old, uint32 new) |
{ |
int i; |
_Bool ret; |
|
i = pthread_mutex_lock (&sync_lock); |
__go_assert (i == 0); |
|
if (*ptr != old) |
ret = 0; |
else |
{ |
*ptr = new; |
ret = 1; |
} |
|
i = pthread_mutex_unlock (&sync_lock); |
__go_assert (i == 0); |
|
return ret; |
} |
|
#endif |
|
#ifndef HAVE_SYNC_BOOL_COMPARE_AND_SWAP_8 |
|
_Bool |
__sync_bool_compare_and_swap_8 (uint64*, uint64, uint64) |
__attribute__ ((visibility ("hidden"))); |
|
_Bool |
__sync_bool_compare_and_swap_8 (uint64* ptr, uint64 old, uint64 new) |
{ |
int i; |
_Bool ret; |
|
i = pthread_mutex_lock (&sync_lock); |
__go_assert (i == 0); |
|
if (*ptr != old) |
ret = 0; |
else |
{ |
*ptr = new; |
ret = 1; |
} |
|
i = pthread_mutex_unlock (&sync_lock); |
__go_assert (i == 0); |
|
return ret; |
} |
|
#endif |
|
#ifndef HAVE_SYNC_FETCH_AND_ADD_4 |
|
uint32 |
__sync_fetch_and_add_4 (uint32*, uint32) |
__attribute__ ((visibility ("hidden"))); |
|
uint32 |
__sync_fetch_and_add_4 (uint32* ptr, uint32 add) |
{ |
int i; |
uint32 ret; |
|
i = pthread_mutex_lock (&sync_lock); |
__go_assert (i == 0); |
|
ret = *ptr; |
*ptr += add; |
|
i = pthread_mutex_unlock (&sync_lock); |
__go_assert (i == 0); |
|
return ret; |
} |
|
#endif |
|
#ifndef HAVE_SYNC_ADD_AND_FETCH_8 |
|
uint64 |
__sync_add_and_fetch_8 (uint64*, uint64) |
__attribute__ ((visibility ("hidden"))); |
|
uint64 |
__sync_add_and_fetch_8 (uint64* ptr, uint64 add) |
{ |
int i; |
uint64 ret; |
|
i = pthread_mutex_lock (&sync_lock); |
__go_assert (i == 0); |
|
*ptr += add; |
ret = *ptr; |
|
i = pthread_mutex_unlock (&sync_lock); |
__go_assert (i == 0); |
|
return ret; |
} |
|
#endif |
|
// Called to initialize a new m (including the bootstrap m). |
void |
runtime_minit(void) |
{ |
byte* stack; |
size_t stacksize; |
stack_t ss; |
|
// Initialize signal handling. |
runtime_m()->gsignal = runtime_malg(32*1024, &stack, &stacksize); // OS X wants >=8K, Linux >=2K |
ss.ss_sp = stack; |
ss.ss_flags = 0; |
ss.ss_size = stacksize; |
if(sigaltstack(&ss, nil) < 0) |
*(int *)0xf1 = 0xf1; |
} |
/reflect.goc
0,0 → 1,27
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
package reflect |
#include "go-type.h" |
#include "interface.h" |
#include "runtime.h" |
#include "go-panic.h" |
|
func ifaceE2I(inter *Type, e Eface, ret *Iface) { |
const Type *t; |
Eface err; |
|
if(((uintptr)e.__type_descriptor&reflectFlags) != 0) |
runtime_throw("invalid interface value"); |
t = e.__type_descriptor; |
if(t == nil) { |
// explicit conversions require non-nil interface value. |
newTypeAssertionError(nil, nil, inter, |
nil, nil, inter->__reflection, |
nil, &err); |
__go_panic(err); |
} |
ret->__object = e.__object; |
ret->__methods = __go_convert_interface(inter, t); |
} |
/go-eface-val-compare.c
0,0 → 1,35
/* go-eface-val-compare.c -- compare an empty interface with a value. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-type.h" |
#include "interface.h" |
|
/* Compare an empty interface with a value. Return 0 for equal, not |
zero for not equal (return value is like strcmp). */ |
|
int |
__go_empty_interface_value_compare ( |
struct __go_empty_interface left, |
const struct __go_type_descriptor *right_descriptor, |
const void *val) |
{ |
const struct __go_type_descriptor *left_descriptor; |
|
left_descriptor = left.__type_descriptor; |
if (((uintptr_t) left_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
if (left_descriptor == NULL) |
return 1; |
if (!__go_type_descriptors_equal (left_descriptor, right_descriptor)) |
return 1; |
if (__go_is_pointer_type (left_descriptor)) |
return left.__object == val ? 0 : 1; |
if (!left_descriptor->__equalfn (left.__object, val, |
left_descriptor->__size)) |
return 1; |
return 0; |
} |
/go-type-identity.c
0,0 → 1,55
/* go-type-identity.c -- hash and equality identity functions. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
|
#include "go-type.h" |
|
/* The 64-bit type. */ |
|
typedef unsigned int DItype __attribute__ ((mode (DI))); |
|
/* An identity hash function for a type. This is used for types where |
we can simply use the type value itself as a hash code. This is |
true of, e.g., integers and pointers. */ |
|
uintptr_t |
__go_type_hash_identity (const void *key, uintptr_t key_size) |
{ |
uintptr_t ret; |
uintptr_t i; |
const unsigned char *p; |
|
if (key_size <= 8) |
{ |
union |
{ |
DItype v; |
unsigned char a[8]; |
} u; |
u.v = 0; |
__builtin_memcpy (&u.a, key, key_size); |
if (sizeof (uintptr_t) >= 8) |
return (uintptr_t) u.v; |
else |
return (uintptr_t) ((u.v >> 32) ^ (u.v & 0xffffffff)); |
} |
|
ret = 5381; |
for (i = 0, p = (const unsigned char *) key; i < key_size; i++, p++) |
ret = ret * 33 + *p; |
return ret; |
} |
|
/* An identity equality function for a type. This is used for types |
where we can check for equality by checking that the values have |
the same bits. */ |
|
_Bool |
__go_type_equal_identity (const void *k1, const void *k2, uintptr_t key_size) |
{ |
return __builtin_memcmp (k1, k2, key_size) == 0; |
} |
/sema.goc
0,0 → 1,181
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Semaphore implementation exposed to Go. |
// Intended use is provide a sleep and wakeup |
// primitive that can be used in the contended case |
// of other synchronization primitives. |
// Thus it targets the same goal as Linux's futex, |
// but it has much simpler semantics. |
// |
// That is, don't think of these as semaphores. |
// Think of them as a way to implement sleep and wakeup |
// such that every sleep is paired with a single wakeup, |
// even if, due to races, the wakeup happens before the sleep. |
// |
// See Mullender and Cox, ``Semaphores in Plan 9,'' |
// http://swtch.com/semaphore.pdf |
|
package runtime |
#include "runtime.h" |
#include "arch.h" |
|
typedef struct Sema Sema; |
struct Sema |
{ |
uint32 volatile *addr; |
G *g; |
Sema *prev; |
Sema *next; |
}; |
|
typedef struct SemaRoot SemaRoot; |
struct SemaRoot |
{ |
Lock; |
Sema *head; |
Sema *tail; |
// Number of waiters. Read w/o the lock. |
uint32 volatile nwait; |
}; |
|
// Prime to not correlate with any user patterns. |
#define SEMTABLESZ 251 |
|
static union |
{ |
SemaRoot; |
uint8 pad[CacheLineSize]; |
} semtable[SEMTABLESZ]; |
|
static SemaRoot* |
semroot(uint32 volatile *addr) |
{ |
return &semtable[((uintptr)addr >> 3) % SEMTABLESZ]; |
} |
|
static void |
semqueue(SemaRoot *root, uint32 volatile *addr, Sema *s) |
{ |
s->g = runtime_g(); |
s->addr = addr; |
s->next = nil; |
s->prev = root->tail; |
if(root->tail) |
root->tail->next = s; |
else |
root->head = s; |
root->tail = s; |
} |
|
static void |
semdequeue(SemaRoot *root, Sema *s) |
{ |
if(s->next) |
s->next->prev = s->prev; |
else |
root->tail = s->prev; |
if(s->prev) |
s->prev->next = s->next; |
else |
root->head = s->next; |
s->prev = nil; |
s->next = nil; |
} |
|
static int32 |
cansemacquire(uint32 volatile *addr) |
{ |
uint32 v; |
|
while((v = runtime_atomicload(addr)) > 0) |
if(runtime_cas(addr, v, v-1)) |
return 1; |
return 0; |
} |
|
void |
runtime_semacquire(uint32 volatile *addr) |
{ |
G *g; |
Sema s; |
SemaRoot *root; |
|
// Easy case. |
if(cansemacquire(addr)) |
return; |
|
// Harder case: |
// increment waiter count |
// try cansemacquire one more time, return if succeeded |
// enqueue itself as a waiter |
// sleep |
// (waiter descriptor is dequeued by signaler) |
g = runtime_g(); |
root = semroot(addr); |
for(;;) { |
|
runtime_lock(root); |
// Add ourselves to nwait to disable "easy case" in semrelease. |
runtime_xadd(&root->nwait, 1); |
// Check cansemacquire to avoid missed wakeup. |
if(cansemacquire(addr)) { |
runtime_xadd(&root->nwait, -1); |
runtime_unlock(root); |
return; |
} |
// Any semrelease after the cansemacquire knows we're waiting |
// (we set nwait above), so go to sleep. |
semqueue(root, addr, &s); |
g->status = Gwaiting; |
g->waitreason = "semacquire"; |
runtime_unlock(root); |
runtime_gosched(); |
if(cansemacquire(addr)) |
return; |
} |
} |
|
void |
runtime_semrelease(uint32 volatile *addr) |
{ |
Sema *s; |
SemaRoot *root; |
|
root = semroot(addr); |
runtime_xadd(addr, 1); |
|
// Easy case: no waiters? |
// This check must happen after the xadd, to avoid a missed wakeup |
// (see loop in semacquire). |
if(runtime_atomicload(&root->nwait) == 0) |
return; |
|
// Harder case: search for a waiter and wake it. |
runtime_lock(root); |
if(runtime_atomicload(&root->nwait) == 0) { |
// The count is already consumed by another goroutine, |
// so no need to wake up another goroutine. |
runtime_unlock(root); |
return; |
} |
for(s = root->head; s; s = s->next) { |
if(s->addr == addr) { |
runtime_xadd(&root->nwait, -1); |
semdequeue(root, s); |
break; |
} |
} |
runtime_unlock(root); |
if(s) |
runtime_ready(s->g); |
} |
|
func Semacquire(addr *uint32) { |
runtime_semacquire(addr); |
} |
|
func Semrelease(addr *uint32) { |
runtime_semrelease(addr); |
} |
/go-unsafe-new.c
0,0 → 1,31
/* go-unsafe-new.c -- unsafe.New function for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-type.h" |
#include "interface.h" |
|
/* Implement unsafe.New. */ |
|
void *New (struct __go_empty_interface type) asm ("libgo_unsafe.unsafe.New"); |
|
/* The dynamic type of the argument will be a pointer to a type |
descriptor. */ |
|
void * |
New (struct __go_empty_interface type) |
{ |
const struct __go_type_descriptor *descriptor; |
|
if (((uintptr_t) type.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
/* FIXME: We should check __type_descriptor to verify that this is |
really a type descriptor. */ |
descriptor = (const struct __go_type_descriptor *) type.__object; |
return __go_alloc (descriptor->__size); |
} |
/go-typestring.c
0,0 → 1,18
/* go-typestring.c -- the runtime.typestring function. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "interface.h" |
#include "go-type.h" |
#include "go-string.h" |
|
struct __go_string typestring(struct __go_empty_interface) |
asm ("libgo_runtime.runtime.typestring"); |
|
struct __go_string |
typestring (struct __go_empty_interface e) |
{ |
return *e.__type_descriptor->__reflection; |
} |
/go-cgo.c
0,0 → 1,42
/* go-cgo.c -- SWIG support routines for libgo. |
|
Copyright 2011 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "go-alloc.h" |
#include "interface.h" |
#include "go-panic.h" |
#include "go-string.h" |
|
/* These are routines used by SWIG. The gc runtime library provides |
the same routines under the same name, though in that case the code |
is required to import runtime/cgo. */ |
|
void * |
_cgo_allocate (size_t n) |
{ |
return __go_alloc (n); |
} |
|
extern const struct __go_type_descriptor string_type_descriptor |
asm ("__go_tdn_string"); |
|
void |
_cgo_panic (const char *p) |
{ |
int len; |
unsigned char *data; |
struct __go_string *ps; |
struct __go_empty_interface e; |
|
len = __builtin_strlen (p); |
data = __go_alloc (len); |
__builtin_memcpy (data, p, len); |
ps = __go_alloc (sizeof *ps); |
ps->__data = data; |
ps->__length = len; |
e.__type_descriptor = &string_type_descriptor; |
e.__object = ps; |
__go_panic (e); |
} |
/go-runtime-error.c
0,0 → 1,84
/* go-runtime-error.c -- Go runtime error. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
|
/* The compiler generates calls to this function. This enum values |
are known to the compiler and used by compiled code. Any change |
here must be reflected in the compiler. */ |
|
enum |
{ |
/* Slice index out of bounds: negative or larger than the length of |
the slice. */ |
SLICE_INDEX_OUT_OF_BOUNDS = 0, |
|
/* Array index out of bounds. */ |
ARRAY_INDEX_OUT_OF_BOUNDS = 1, |
|
/* String index out of bounds. */ |
STRING_INDEX_OUT_OF_BOUNDS = 2, |
|
/* Slice slice out of bounds: negative or larger than the length of |
the slice or high bound less than low bound. */ |
SLICE_SLICE_OUT_OF_BOUNDS = 3, |
|
/* Array slice out of bounds. */ |
ARRAY_SLICE_OUT_OF_BOUNDS = 4, |
|
/* String slice out of bounds. */ |
STRING_SLICE_OUT_OF_BOUNDS = 5, |
|
/* Dereference of nil pointer. This is used when there is a |
dereference of a pointer to a very large struct or array, to |
ensure that a gigantic array is not used a proxy to access random |
memory locations. */ |
NIL_DEREFERENCE = 6, |
|
/* Slice length or capacity out of bounds in make: negative or |
overflow or length greater than capacity. */ |
MAKE_SLICE_OUT_OF_BOUNDS = 7, |
|
/* Map capacity out of bounds in make: negative or overflow. */ |
MAKE_MAP_OUT_OF_BOUNDS = 8, |
|
/* Channel capacity out of bounds in make: negative or overflow. */ |
MAKE_CHAN_OUT_OF_BOUNDS = 9 |
}; |
|
extern void __go_runtime_error () __attribute__ ((noreturn)); |
|
void |
__go_runtime_error (int i) |
{ |
switch (i) |
{ |
case SLICE_INDEX_OUT_OF_BOUNDS: |
case ARRAY_INDEX_OUT_OF_BOUNDS: |
case STRING_INDEX_OUT_OF_BOUNDS: |
runtime_panicstring ("index out of range"); |
|
case SLICE_SLICE_OUT_OF_BOUNDS: |
case ARRAY_SLICE_OUT_OF_BOUNDS: |
case STRING_SLICE_OUT_OF_BOUNDS: |
runtime_panicstring ("slice bounds out of range"); |
|
case NIL_DEREFERENCE: |
runtime_panicstring ("nil pointer dereference"); |
|
case MAKE_SLICE_OUT_OF_BOUNDS: |
runtime_panicstring ("make slice len or cap out of range"); |
|
case MAKE_MAP_OUT_OF_BOUNDS: |
runtime_panicstring ("make map len out of range"); |
|
case MAKE_CHAN_OUT_OF_BOUNDS: |
runtime_panicstring ("make chan len out of range"); |
|
default: |
runtime_panicstring ("unknown runtime error"); |
} |
} |
/go-nanotime.c
0,0 → 1,21
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Return current time in nanoseconds. |
|
#include <sys/time.h> |
|
#include "runtime.h" |
|
int64 runtime_nanotime (void) |
__attribute__ ((no_split_stack)); |
|
int64 |
runtime_nanotime (void) |
{ |
struct timeval tv; |
|
gettimeofday (&tv, NULL); |
return (int64) tv.tv_sec * 1000000000 + (int64) tv.tv_usec * 1000; |
} |
/go-string.h
0,0 → 1,42
/* go-string.h -- the string type for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#ifndef LIBGO_GO_STRING_H |
#define LIBGO_GO_STRING_H |
|
#include <stddef.h> |
|
/* A string is an instance of this structure. */ |
|
struct __go_string |
{ |
/* The bytes. */ |
const unsigned char *__data; |
/* The length. */ |
int __length; |
}; |
|
static inline _Bool |
__go_strings_equal (struct __go_string s1, struct __go_string s2) |
{ |
return (s1.__length == s2.__length |
&& __builtin_memcmp (s1.__data, s2.__data, s1.__length) == 0); |
} |
|
static inline _Bool |
__go_ptr_strings_equal (const struct __go_string *ps1, |
const struct __go_string *ps2) |
{ |
if (ps1 == NULL) |
return ps2 == NULL; |
if (ps2 == NULL) |
return 0; |
return __go_strings_equal (*ps1, *ps2); |
} |
|
extern int __go_get_rune (const unsigned char *, size_t, int *); |
|
#endif /* !defined(LIBGO_GO_STRING_H) */ |
/go-map-delete.c
0,0 → 1,56
/* go-map-delete.c -- delete an entry from a map. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdlib.h> |
|
#include "runtime.h" |
#include "go-alloc.h" |
#include "go-assert.h" |
#include "map.h" |
|
/* Delete the entry matching KEY from MAP. */ |
|
void |
__go_map_delete (struct __go_map *map, const void *key) |
{ |
const struct __go_map_descriptor *descriptor; |
const struct __go_type_descriptor *key_descriptor; |
uintptr_t key_offset; |
_Bool (*equalfn) (const void*, const void*, uintptr_t); |
size_t key_hash; |
size_t key_size; |
size_t bucket_index; |
void **pentry; |
|
if (map == NULL) |
runtime_panicstring ("deletion of entry in nil map"); |
|
descriptor = map->__descriptor; |
|
key_descriptor = descriptor->__map_descriptor->__key_type; |
key_offset = descriptor->__key_offset; |
key_size = key_descriptor->__size; |
__go_assert (key_size != 0 && key_size != -1UL); |
equalfn = key_descriptor->__equalfn; |
|
key_hash = key_descriptor->__hashfn (key, key_size); |
bucket_index = key_hash % map->__bucket_count; |
|
pentry = map->__buckets + bucket_index; |
while (*pentry != NULL) |
{ |
char *entry = (char *) *pentry; |
if (equalfn (key, entry + key_offset, key_size)) |
{ |
*pentry = *(void **) entry; |
__go_free (entry); |
map->__element_count -= 1; |
break; |
} |
pentry = (void **) entry; |
} |
} |
/map.h
0,0 → 1,87
/* map.h -- the map type for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include <stddef.h> |
#include <stdint.h> |
|
#include "go-type.h" |
|
/* A map descriptor is what we need to manipulate the map. This is |
constant for a given map type. */ |
|
struct __go_map_descriptor |
{ |
/* A pointer to the type descriptor for the type of the map itself. */ |
const struct __go_map_type *__map_descriptor; |
|
/* A map entry is a struct with three fields: |
map_entry_type *next_entry; |
key_type key; |
value_type value; |
This is the size of that struct. */ |
uintptr_t __entry_size; |
|
/* The offset of the key field in a map entry struct. */ |
uintptr_t __key_offset; |
|
/* The offset of the value field in a map entry struct (the value |
field immediately follows the key field, but there may be some |
bytes inserted for alignment). */ |
uintptr_t __val_offset; |
}; |
|
struct __go_map |
{ |
/* The constant descriptor for this map. */ |
const struct __go_map_descriptor *__descriptor; |
|
/* The number of elements in the hash table. */ |
uintptr_t __element_count; |
|
/* The number of entries in the __buckets array. */ |
uintptr_t __bucket_count; |
|
/* Each bucket is a pointer to a linked list of map entries. */ |
void **__buckets; |
}; |
|
/* For a map iteration the compiled code will use a pointer to an |
iteration structure. The iteration structure will be allocated on |
the stack. The Go code must allocate at least enough space. */ |
|
struct __go_hash_iter |
{ |
/* A pointer to the current entry. This will be set to NULL when |
the range has completed. The Go will test this field, so it must |
be the first one in the structure. */ |
const void *entry; |
/* The map we are iterating over. */ |
const struct __go_map *map; |
/* A pointer to the next entry in the current bucket. This permits |
deleting the current entry. This will be NULL when we have seen |
all the entries in the current bucket. */ |
const void *next_entry; |
/* The bucket index of the current and next entry. */ |
uintptr_t bucket; |
}; |
|
extern struct __go_map *__go_new_map (const struct __go_map_descriptor *, |
uintptr_t); |
|
extern uintptr_t __go_map_next_prime (uintptr_t); |
|
extern void *__go_map_index (struct __go_map *, const void *, _Bool); |
|
extern void __go_map_delete (struct __go_map *, const void *); |
|
extern void __go_mapiterinit (const struct __go_map *, struct __go_hash_iter *); |
|
extern void __go_mapiternext (struct __go_hash_iter *); |
|
extern void __go_mapiter1 (struct __go_hash_iter *it, unsigned char *key); |
|
extern void __go_mapiter2 (struct __go_hash_iter *it, unsigned char *key, |
unsigned char *val); |
/defs.h
0,0 → 1,12
/* defs.h -- runtime definitions for Go. |
|
Copyright 2009 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
/* The gc library uses this file for system defines, and generates it |
automatically using the godefs program. The logical thing to put |
here for gccgo would be #include statements for system header |
files. We can't do that, though, because runtime.h #define's the |
standard types. So we #include the system headers from runtime.h |
instead. */ |
/mgc0.c
0,0 → 1,1337
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Garbage collector. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
#ifdef USING_SPLIT_STACK |
|
extern void * __splitstack_find (void *, void *, size_t *, void **, void **, |
void **); |
|
extern void * __splitstack_find_context (void *context[10], size_t *, void **, |
void **, void **); |
|
#endif |
|
enum { |
Debug = 0, |
PtrSize = sizeof(void*), |
DebugMark = 0, // run second pass to check mark |
|
// Four bits per word (see #defines below). |
wordsPerBitmapWord = sizeof(void*)*8/4, |
bitShift = sizeof(void*)*8/4, |
}; |
|
// Bits in per-word bitmap. |
// #defines because enum might not be able to hold the values. |
// |
// Each word in the bitmap describes wordsPerBitmapWord words |
// of heap memory. There are 4 bitmap bits dedicated to each heap word, |
// so on a 64-bit system there is one bitmap word per 16 heap words. |
// The bits in the word are packed together by type first, then by |
// heap location, so each 64-bit bitmap word consists of, from top to bottom, |
// the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits, |
// then the 16 bitNoPointers/bitBlockBoundary bits, then the 16 bitAllocated bits. |
// This layout makes it easier to iterate over the bits of a given type. |
// |
// The bitmap starts at mheap.arena_start and extends *backward* from |
// there. On a 64-bit system the off'th word in the arena is tracked by |
// the off/16+1'th word before mheap.arena_start. (On a 32-bit system, |
// the only difference is that the divisor is 8.) |
// |
// To pull out the bits corresponding to a given pointer p, we use: |
// |
// off = p - (uintptr*)mheap.arena_start; // word offset |
// b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1; |
// shift = off % wordsPerBitmapWord |
// bits = *b >> shift; |
// /* then test bits & bitAllocated, bits & bitMarked, etc. */ |
// |
#define bitAllocated ((uintptr)1<<(bitShift*0)) |
#define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */ |
#define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */ |
#define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */ |
#define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */ |
|
#define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial) |
|
// TODO: Make these per-M. |
static uint64 nhandoff; |
|
static int32 gctrace; |
|
typedef struct Workbuf Workbuf; |
struct Workbuf |
{ |
Workbuf *next; |
uintptr nobj; |
byte *obj[512-2]; |
}; |
|
typedef struct Finalizer Finalizer; |
struct Finalizer |
{ |
void (*fn)(void*); |
void *arg; |
const struct __go_func_type *ft; |
}; |
|
typedef struct FinBlock FinBlock; |
struct FinBlock |
{ |
FinBlock *alllink; |
FinBlock *next; |
int32 cnt; |
int32 cap; |
Finalizer fin[1]; |
}; |
|
|
static G *fing; |
static FinBlock *finq; // list of finalizers that are to be executed |
static FinBlock *finc; // cache of free blocks |
static FinBlock *allfin; // list of all blocks |
static Lock finlock; |
static int32 fingwait; |
|
static void runfinq(void*); |
static Workbuf* getempty(Workbuf*); |
static Workbuf* getfull(Workbuf*); |
static void putempty(Workbuf*); |
static Workbuf* handoff(Workbuf*); |
|
static struct { |
Lock fmu; |
Workbuf *full; |
Lock emu; |
Workbuf *empty; |
uint32 nproc; |
volatile uint32 nwait; |
volatile uint32 ndone; |
Note alldone; |
Lock markgate; |
Lock sweepgate; |
MSpan *spans; |
|
Lock; |
byte *chunk; |
uintptr nchunk; |
} work; |
|
// scanblock scans a block of n bytes starting at pointer b for references |
// to other objects, scanning any it finds recursively until there are no |
// unscanned objects left. Instead of using an explicit recursion, it keeps |
// a work list in the Workbuf* structures and loops in the main function |
// body. Keeping an explicit work list is easier on the stack allocator and |
// more efficient. |
static void |
scanblock(byte *b, int64 n) |
{ |
byte *obj, *arena_start, *arena_used, *p; |
void **vp; |
uintptr size, *bitp, bits, shift, i, j, x, xbits, off, nobj, nproc; |
MSpan *s; |
PageID k; |
void **wp; |
Workbuf *wbuf; |
bool keepworking; |
|
if((int64)(uintptr)n != n || n < 0) { |
// runtime_printf("scanblock %p %lld\n", b, (long long)n); |
runtime_throw("scanblock"); |
} |
|
// Memory arena parameters. |
arena_start = runtime_mheap.arena_start; |
arena_used = runtime_mheap.arena_used; |
nproc = work.nproc; |
|
wbuf = nil; // current work buffer |
wp = nil; // storage for next queued pointer (write pointer) |
nobj = 0; // number of queued objects |
|
// Scanblock helpers pass b==nil. |
// The main proc needs to return to make more |
// calls to scanblock. But if work.nproc==1 then |
// might as well process blocks as soon as we |
// have them. |
keepworking = b == nil || work.nproc == 1; |
|
// Align b to a word boundary. |
off = (uintptr)b & (PtrSize-1); |
if(off != 0) { |
b += PtrSize - off; |
n -= PtrSize - off; |
} |
|
for(;;) { |
// Each iteration scans the block b of length n, queueing pointers in |
// the work buffer. |
if(Debug > 1) |
runtime_printf("scanblock %p %lld\n", b, (long long) n); |
|
vp = (void**)b; |
n >>= (2+PtrSize/8); /* n /= PtrSize (4 or 8) */ |
for(i=0; i<(uintptr)n; i++) { |
obj = (byte*)vp[i]; |
|
// Words outside the arena cannot be pointers. |
if((byte*)obj < arena_start || (byte*)obj >= arena_used) |
continue; |
|
// obj may be a pointer to a live object. |
// Try to find the beginning of the object. |
|
// Round down to word boundary. |
obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1)); |
|
// Find bits for this word. |
off = (uintptr*)obj - (uintptr*)arena_start; |
bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
xbits = *bitp; |
bits = xbits >> shift; |
|
// Pointing at the beginning of a block? |
if((bits & (bitAllocated|bitBlockBoundary)) != 0) |
goto found; |
|
// Pointing just past the beginning? |
// Scan backward a little to find a block boundary. |
for(j=shift; j-->0; ) { |
if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) { |
obj = (byte*)obj - (shift-j)*PtrSize; |
shift = j; |
bits = xbits>>shift; |
goto found; |
} |
} |
|
// Otherwise consult span table to find beginning. |
// (Manually inlined copy of MHeap_LookupMaybe.) |
k = (uintptr)obj>>PageShift; |
x = k; |
if(sizeof(void*) == 8) |
x -= (uintptr)arena_start>>PageShift; |
s = runtime_mheap.map[x]; |
if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse) |
continue; |
p = (byte*)((uintptr)s->start<<PageShift); |
if(s->sizeclass == 0) { |
obj = p; |
} else { |
if((byte*)obj >= (byte*)s->limit) |
continue; |
size = runtime_class_to_size[s->sizeclass]; |
int32 i = ((byte*)obj - p)/size; |
obj = p+i*size; |
} |
|
// Now that we know the object header, reload bits. |
off = (uintptr*)obj - (uintptr*)arena_start; |
bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
xbits = *bitp; |
bits = xbits >> shift; |
|
found: |
// Now we have bits, bitp, and shift correct for |
// obj pointing at the base of the object. |
// Only care about allocated and not marked. |
if((bits & (bitAllocated|bitMarked)) != bitAllocated) |
continue; |
if(nproc == 1) |
*bitp |= bitMarked<<shift; |
else { |
for(;;) { |
x = *bitp; |
if(x & (bitMarked<<shift)) |
goto continue_obj; |
if(runtime_casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift)))) |
break; |
} |
} |
|
// If object has no pointers, don't need to scan further. |
if((bits & bitNoPointers) != 0) |
continue; |
|
// If another proc wants a pointer, give it some. |
if(nobj > 4 && work.nwait > 0 && work.full == nil) { |
wbuf->nobj = nobj; |
wbuf = handoff(wbuf); |
nobj = wbuf->nobj; |
wp = (void**)(wbuf->obj + nobj); |
} |
|
// If buffer is full, get a new one. |
if(wbuf == nil || nobj >= nelem(wbuf->obj)) { |
if(wbuf != nil) |
wbuf->nobj = nobj; |
wbuf = getempty(wbuf); |
wp = (void**)(wbuf->obj); |
nobj = 0; |
} |
*wp++ = obj; |
nobj++; |
continue_obj:; |
} |
|
// Done scanning [b, b+n). Prepare for the next iteration of |
// the loop by setting b and n to the parameters for the next block. |
|
// Fetch b from the work buffer. |
if(nobj == 0) { |
if(!keepworking) { |
putempty(wbuf); |
return; |
} |
// Emptied our buffer: refill. |
wbuf = getfull(wbuf); |
if(wbuf == nil) |
return; |
nobj = wbuf->nobj; |
wp = (void**)(wbuf->obj + wbuf->nobj); |
} |
b = *--wp; |
nobj--; |
|
// Ask span about size class. |
// (Manually inlined copy of MHeap_Lookup.) |
x = (uintptr)b>>PageShift; |
if(sizeof(void*) == 8) |
x -= (uintptr)arena_start>>PageShift; |
s = runtime_mheap.map[x]; |
if(s->sizeclass == 0) |
n = s->npages<<PageShift; |
else |
n = runtime_class_to_size[s->sizeclass]; |
} |
} |
|
// debug_scanblock is the debug copy of scanblock. |
// it is simpler, slower, single-threaded, recursive, |
// and uses bitSpecial as the mark bit. |
static void |
debug_scanblock(byte *b, int64 n) |
{ |
byte *obj, *p; |
void **vp; |
uintptr size, *bitp, bits, shift, i, xbits, off; |
MSpan *s; |
|
if(!DebugMark) |
runtime_throw("debug_scanblock without DebugMark"); |
|
if((int64)(uintptr)n != n || n < 0) { |
//runtime_printf("debug_scanblock %p %D\n", b, n); |
runtime_throw("debug_scanblock"); |
} |
|
// Align b to a word boundary. |
off = (uintptr)b & (PtrSize-1); |
if(off != 0) { |
b += PtrSize - off; |
n -= PtrSize - off; |
} |
|
vp = (void**)b; |
n /= PtrSize; |
for(i=0; i<(uintptr)n; i++) { |
obj = (byte*)vp[i]; |
|
// Words outside the arena cannot be pointers. |
if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used) |
continue; |
|
// Round down to word boundary. |
obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1)); |
|
// Consult span table to find beginning. |
s = runtime_MHeap_LookupMaybe(&runtime_mheap, obj); |
if(s == nil) |
continue; |
|
|
p = (byte*)((uintptr)s->start<<PageShift); |
if(s->sizeclass == 0) { |
obj = p; |
size = (uintptr)s->npages<<PageShift; |
} else { |
if((byte*)obj >= (byte*)s->limit) |
continue; |
size = runtime_class_to_size[s->sizeclass]; |
int32 i = ((byte*)obj - p)/size; |
obj = p+i*size; |
} |
|
// Now that we know the object header, reload bits. |
off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start; |
bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
xbits = *bitp; |
bits = xbits >> shift; |
|
// Now we have bits, bitp, and shift correct for |
// obj pointing at the base of the object. |
// If not allocated or already marked, done. |
if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0) // NOTE: bitSpecial not bitMarked |
continue; |
*bitp |= bitSpecial<<shift; |
if(!(bits & bitMarked)) |
runtime_printf("found unmarked block %p in %p\n", obj, vp+i); |
|
// If object has no pointers, don't need to scan further. |
if((bits & bitNoPointers) != 0) |
continue; |
|
debug_scanblock(obj, size); |
} |
} |
|
// Get an empty work buffer off the work.empty list, |
// allocating new buffers as needed. |
static Workbuf* |
getempty(Workbuf *b) |
{ |
if(work.nproc == 1) { |
// Put b on full list. |
if(b != nil) { |
b->next = work.full; |
work.full = b; |
} |
// Grab from empty list if possible. |
b = work.empty; |
if(b != nil) { |
work.empty = b->next; |
goto haveb; |
} |
} else { |
// Put b on full list. |
if(b != nil) { |
runtime_lock(&work.fmu); |
b->next = work.full; |
work.full = b; |
runtime_unlock(&work.fmu); |
} |
// Grab from empty list if possible. |
runtime_lock(&work.emu); |
b = work.empty; |
if(b != nil) |
work.empty = b->next; |
runtime_unlock(&work.emu); |
if(b != nil) |
goto haveb; |
} |
|
// Need to allocate. |
runtime_lock(&work); |
if(work.nchunk < sizeof *b) { |
work.nchunk = 1<<20; |
work.chunk = runtime_SysAlloc(work.nchunk); |
} |
b = (Workbuf*)work.chunk; |
work.chunk += sizeof *b; |
work.nchunk -= sizeof *b; |
runtime_unlock(&work); |
|
haveb: |
b->nobj = 0; |
return b; |
} |
|
static void |
putempty(Workbuf *b) |
{ |
if(b == nil) |
return; |
|
if(work.nproc == 1) { |
b->next = work.empty; |
work.empty = b; |
return; |
} |
|
runtime_lock(&work.emu); |
b->next = work.empty; |
work.empty = b; |
runtime_unlock(&work.emu); |
} |
|
// Get a full work buffer off the work.full list, or return nil. |
static Workbuf* |
getfull(Workbuf *b) |
{ |
int32 i; |
Workbuf *b1; |
|
if(work.nproc == 1) { |
// Put b on empty list. |
if(b != nil) { |
b->next = work.empty; |
work.empty = b; |
} |
// Grab from full list if possible. |
// Since work.nproc==1, no one else is |
// going to give us work. |
b = work.full; |
if(b != nil) |
work.full = b->next; |
return b; |
} |
|
putempty(b); |
|
// Grab buffer from full list if possible. |
for(;;) { |
b1 = work.full; |
if(b1 == nil) |
break; |
runtime_lock(&work.fmu); |
if(work.full != nil) { |
b1 = work.full; |
work.full = b1->next; |
runtime_unlock(&work.fmu); |
return b1; |
} |
runtime_unlock(&work.fmu); |
} |
|
runtime_xadd(&work.nwait, +1); |
for(i=0;; i++) { |
b1 = work.full; |
if(b1 != nil) { |
runtime_lock(&work.fmu); |
if(work.full != nil) { |
runtime_xadd(&work.nwait, -1); |
b1 = work.full; |
work.full = b1->next; |
runtime_unlock(&work.fmu); |
return b1; |
} |
runtime_unlock(&work.fmu); |
continue; |
} |
if(work.nwait == work.nproc) |
return nil; |
if(i < 10) |
runtime_procyield(20); |
else if(i < 20) |
runtime_osyield(); |
else |
runtime_usleep(100); |
} |
} |
|
static Workbuf* |
handoff(Workbuf *b) |
{ |
int32 n; |
Workbuf *b1; |
|
// Make new buffer with half of b's pointers. |
b1 = getempty(nil); |
n = b->nobj/2; |
b->nobj -= n; |
b1->nobj = n; |
runtime_memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]); |
nhandoff += n; |
|
// Put b on full list - let first half of b get stolen. |
runtime_lock(&work.fmu); |
b->next = work.full; |
work.full = b; |
runtime_unlock(&work.fmu); |
|
return b1; |
} |
|
// Scanstack calls scanblock on each of gp's stack segments. |
static void |
scanstack(void (*scanblock)(byte*, int64), G *gp) |
{ |
#ifdef USING_SPLIT_STACK |
M *mp; |
void* sp; |
size_t spsize; |
void* next_segment; |
void* next_sp; |
void* initial_sp; |
|
if(gp == runtime_g()) { |
// Scanning our own stack. |
sp = __splitstack_find(nil, nil, &spsize, &next_segment, |
&next_sp, &initial_sp); |
} else if((mp = gp->m) != nil && mp->helpgc) { |
// gchelper's stack is in active use and has no interesting pointers. |
return; |
} else { |
// Scanning another goroutine's stack. |
// The goroutine is usually asleep (the world is stopped). |
|
// The exception is that if the goroutine is about to enter or might |
// have just exited a system call, it may be executing code such |
// as schedlock and may have needed to start a new stack segment. |
// Use the stack segment and stack pointer at the time of |
// the system call instead, since that won't change underfoot. |
if(gp->gcstack != nil) { |
sp = gp->gcstack; |
spsize = gp->gcstack_size; |
next_segment = gp->gcnext_segment; |
next_sp = gp->gcnext_sp; |
initial_sp = gp->gcinitial_sp; |
} else { |
sp = __splitstack_find_context(&gp->stack_context[0], |
&spsize, &next_segment, |
&next_sp, &initial_sp); |
} |
} |
if(sp != nil) { |
scanblock(sp, spsize); |
while((sp = __splitstack_find(next_segment, next_sp, |
&spsize, &next_segment, |
&next_sp, &initial_sp)) != nil) |
scanblock(sp, spsize); |
} |
#else |
M *mp; |
byte* bottom; |
byte* top; |
|
if(gp == runtime_g()) { |
// Scanning our own stack. |
bottom = (byte*)&gp; |
} else if((mp = gp->m) != nil && mp->helpgc) { |
// gchelper's stack is in active use and has no interesting pointers. |
return; |
} else { |
// Scanning another goroutine's stack. |
// The goroutine is usually asleep (the world is stopped). |
bottom = (byte*)gp->gcnext_sp; |
if(bottom == nil) |
return; |
} |
top = (byte*)gp->gcinitial_sp + gp->gcstack_size; |
if(top > bottom) |
scanblock(bottom, top - bottom); |
else |
scanblock(top, bottom - top); |
#endif |
} |
|
// Markfin calls scanblock on the blocks that have finalizers: |
// the things pointed at cannot be freed until the finalizers have run. |
static void |
markfin(void *v) |
{ |
uintptr size; |
|
size = 0; |
if(!runtime_mlookup(v, (byte**)&v, &size, nil) || !runtime_blockspecial(v)) |
runtime_throw("mark - finalizer inconsistency"); |
|
// do not mark the finalizer block itself. just mark the things it points at. |
scanblock(v, size); |
} |
|
struct root_list { |
struct root_list *next; |
struct root { |
void *decl; |
size_t size; |
} roots[]; |
}; |
|
static struct root_list* roots; |
|
void |
__go_register_gc_roots (struct root_list* r) |
{ |
// FIXME: This needs locking if multiple goroutines can call |
// dlopen simultaneously. |
r->next = roots; |
roots = r; |
} |
|
static void |
debug_markfin(void *v) |
{ |
uintptr size; |
|
if(!runtime_mlookup(v, (byte**)&v, &size, nil)) |
runtime_throw("debug_mark - finalizer inconsistency"); |
debug_scanblock(v, size); |
} |
|
// Mark |
static void |
mark(void (*scan)(byte*, int64)) |
{ |
struct root_list *pl; |
G *gp; |
FinBlock *fb; |
|
// mark data+bss. |
for(pl = roots; pl != nil; pl = pl->next) { |
struct root* pr = &pl->roots[0]; |
while(1) { |
void *decl = pr->decl; |
if(decl == nil) |
break; |
scanblock(decl, pr->size); |
pr++; |
} |
} |
|
scan((byte*)&runtime_m0, sizeof runtime_m0); |
scan((byte*)&runtime_g0, sizeof runtime_g0); |
scan((byte*)&runtime_allg, sizeof runtime_allg); |
scan((byte*)&runtime_allm, sizeof runtime_allm); |
runtime_MProf_Mark(scan); |
runtime_time_scan(scan); |
|
// mark stacks |
for(gp=runtime_allg; gp!=nil; gp=gp->alllink) { |
switch(gp->status){ |
default: |
runtime_printf("unexpected G.status %d\n", gp->status); |
runtime_throw("mark - bad status"); |
case Gdead: |
break; |
case Grunning: |
if(gp != runtime_g()) |
runtime_throw("mark - world not stopped"); |
scanstack(scan, gp); |
break; |
case Grunnable: |
case Gsyscall: |
case Gwaiting: |
scanstack(scan, gp); |
break; |
} |
} |
|
// mark things pointed at by objects with finalizers |
if(scan == debug_scanblock) |
runtime_walkfintab(debug_markfin, scan); |
else |
runtime_walkfintab(markfin, scan); |
|
for(fb=allfin; fb; fb=fb->alllink) |
scanblock((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0])); |
|
// in multiproc mode, join in the queued work. |
scan(nil, 0); |
} |
|
static bool |
handlespecial(byte *p, uintptr size) |
{ |
void (*fn)(void*); |
const struct __go_func_type *ft; |
FinBlock *block; |
Finalizer *f; |
|
if(!runtime_getfinalizer(p, true, &fn, &ft)) { |
runtime_setblockspecial(p, false); |
runtime_MProf_Free(p, size); |
return false; |
} |
|
runtime_lock(&finlock); |
if(finq == nil || finq->cnt == finq->cap) { |
if(finc == nil) { |
finc = runtime_SysAlloc(PageSize); |
finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1; |
finc->alllink = allfin; |
allfin = finc; |
} |
block = finc; |
finc = block->next; |
block->next = finq; |
finq = block; |
} |
f = &finq->fin[finq->cnt]; |
finq->cnt++; |
f->fn = fn; |
f->ft = ft; |
f->arg = p; |
runtime_unlock(&finlock); |
return true; |
} |
|
// Sweep frees or collects finalizers for blocks not marked in the mark phase. |
// It clears the mark bits in preparation for the next GC round. |
static void |
sweep(void) |
{ |
M *m; |
MSpan *s; |
int32 cl, n, npages; |
uintptr size; |
byte *p; |
MCache *c; |
byte *arena_start; |
|
m = runtime_m(); |
arena_start = runtime_mheap.arena_start; |
|
for(;;) { |
s = work.spans; |
if(s == nil) |
break; |
if(!runtime_casp(&work.spans, s, s->allnext)) |
continue; |
|
if(s->state != MSpanInUse) |
continue; |
|
p = (byte*)(s->start << PageShift); |
cl = s->sizeclass; |
if(cl == 0) { |
size = s->npages<<PageShift; |
n = 1; |
} else { |
// Chunk full of small blocks. |
size = runtime_class_to_size[cl]; |
npages = runtime_class_to_allocnpages[cl]; |
n = (npages << PageShift) / size; |
} |
|
// Sweep through n objects of given size starting at p. |
// This thread owns the span now, so it can manipulate |
// the block bitmap without atomic operations. |
for(; n > 0; n--, p += size) { |
uintptr off, *bitp, shift, bits; |
|
off = (uintptr*)p - (uintptr*)arena_start; |
bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
bits = *bitp>>shift; |
|
if((bits & bitAllocated) == 0) |
continue; |
|
if((bits & bitMarked) != 0) { |
if(DebugMark) { |
if(!(bits & bitSpecial)) |
runtime_printf("found spurious mark on %p\n", p); |
*bitp &= ~(bitSpecial<<shift); |
} |
*bitp &= ~(bitMarked<<shift); |
continue; |
} |
|
// Special means it has a finalizer or is being profiled. |
// In DebugMark mode, the bit has been coopted so |
// we have to assume all blocks are special. |
if(DebugMark || (bits & bitSpecial) != 0) { |
if(handlespecial(p, size)) |
continue; |
} |
|
// Mark freed; restore block boundary bit. |
*bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); |
|
c = m->mcache; |
if(s->sizeclass == 0) { |
// Free large span. |
runtime_unmarkspan(p, 1<<PageShift); |
*(uintptr*)p = 1; // needs zeroing |
runtime_MHeap_Free(&runtime_mheap, s, 1); |
} else { |
// Free small object. |
if(size > sizeof(uintptr)) |
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed" |
c->local_by_size[s->sizeclass].nfree++; |
runtime_MCache_Free(c, p, s->sizeclass, size); |
} |
c->local_alloc -= size; |
c->local_nfree++; |
} |
} |
} |
|
void |
runtime_gchelper(void) |
{ |
// Wait until main proc is ready for mark help. |
runtime_lock(&work.markgate); |
runtime_unlock(&work.markgate); |
scanblock(nil, 0); |
|
// Wait until main proc is ready for sweep help. |
runtime_lock(&work.sweepgate); |
runtime_unlock(&work.sweepgate); |
sweep(); |
|
if(runtime_xadd(&work.ndone, +1) == work.nproc-1) |
runtime_notewakeup(&work.alldone); |
} |
|
// Semaphore, not Lock, so that the goroutine |
// reschedules when there is contention rather |
// than spinning. |
static uint32 gcsema = 1; |
|
// Initialized from $GOGC. GOGC=off means no gc. |
// |
// Next gc is after we've allocated an extra amount of |
// memory proportional to the amount already in use. |
// If gcpercent=100 and we're using 4M, we'll gc again |
// when we get to 8M. This keeps the gc cost in linear |
// proportion to the allocation cost. Adjusting gcpercent |
// just changes the linear constant (and also the amount of |
// extra memory used). |
static int32 gcpercent = -2; |
|
static void |
stealcache(void) |
{ |
M *m; |
|
for(m=runtime_allm; m; m=m->alllink) |
runtime_MCache_ReleaseAll(m->mcache); |
} |
|
static void |
cachestats(void) |
{ |
M *m; |
MCache *c; |
uint32 i; |
uint64 stacks_inuse; |
uint64 stacks_sys; |
|
stacks_inuse = 0; |
stacks_sys = 0; |
for(m=runtime_allm; m; m=m->alllink) { |
runtime_purgecachedstats(m); |
// stacks_inuse += m->stackalloc->inuse; |
// stacks_sys += m->stackalloc->sys; |
c = m->mcache; |
for(i=0; i<nelem(c->local_by_size); i++) { |
mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc; |
c->local_by_size[i].nmalloc = 0; |
mstats.by_size[i].nfree += c->local_by_size[i].nfree; |
c->local_by_size[i].nfree = 0; |
} |
} |
mstats.stacks_inuse = stacks_inuse; |
mstats.stacks_sys = stacks_sys; |
} |
|
void |
runtime_gc(int32 force) |
{ |
M *m; |
int64 t0, t1, t2, t3; |
uint64 heap0, heap1, obj0, obj1; |
const byte *p; |
bool extra; |
|
// Make sure all registers are saved on stack so that |
// scanstack sees them. |
__builtin_unwind_init(); |
|
// The gc is turned off (via enablegc) until |
// the bootstrap has completed. |
// Also, malloc gets called in the guts |
// of a number of libraries that might be |
// holding locks. To avoid priority inversion |
// problems, don't bother trying to run gc |
// while holding a lock. The next mallocgc |
// without a lock will do the gc instead. |
m = runtime_m(); |
if(!mstats.enablegc || m->locks > 0 || runtime_panicking) |
return; |
|
if(gcpercent == -2) { // first time through |
p = runtime_getenv("GOGC"); |
if(p == nil || p[0] == '\0') |
gcpercent = 100; |
else if(runtime_strcmp((const char*)p, "off") == 0) |
gcpercent = -1; |
else |
gcpercent = runtime_atoi(p); |
|
p = runtime_getenv("GOGCTRACE"); |
if(p != nil) |
gctrace = runtime_atoi(p); |
} |
if(gcpercent < 0) |
return; |
|
runtime_semacquire(&gcsema); |
if(!force && mstats.heap_alloc < mstats.next_gc) { |
runtime_semrelease(&gcsema); |
return; |
} |
|
t0 = runtime_nanotime(); |
nhandoff = 0; |
|
m->gcing = 1; |
runtime_stoptheworld(); |
|
cachestats(); |
heap0 = mstats.heap_alloc; |
obj0 = mstats.nmalloc - mstats.nfree; |
|
runtime_lock(&work.markgate); |
runtime_lock(&work.sweepgate); |
|
extra = false; |
work.nproc = 1; |
if(runtime_gomaxprocs > 1 && runtime_ncpu > 1) { |
runtime_noteclear(&work.alldone); |
work.nproc += runtime_helpgc(&extra); |
} |
work.nwait = 0; |
work.ndone = 0; |
|
runtime_unlock(&work.markgate); // let the helpers in |
mark(scanblock); |
if(DebugMark) |
mark(debug_scanblock); |
t1 = runtime_nanotime(); |
|
work.spans = runtime_mheap.allspans; |
runtime_unlock(&work.sweepgate); // let the helpers in |
sweep(); |
if(work.nproc > 1) |
runtime_notesleep(&work.alldone); |
t2 = runtime_nanotime(); |
|
stealcache(); |
cachestats(); |
|
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; |
m->gcing = 0; |
|
m->locks++; // disable gc during the mallocs in newproc |
if(finq != nil) { |
// kick off or wake up goroutine to run queued finalizers |
if(fing == nil) |
fing = __go_go(runfinq, nil); |
else if(fingwait) { |
fingwait = 0; |
runtime_ready(fing); |
} |
} |
m->locks--; |
|
cachestats(); |
heap1 = mstats.heap_alloc; |
obj1 = mstats.nmalloc - mstats.nfree; |
|
t3 = runtime_nanotime(); |
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0; |
mstats.pause_total_ns += t3 - t0; |
mstats.numgc++; |
if(mstats.debuggc) |
runtime_printf("pause %llu\n", (unsigned long long)t3-t0); |
|
if(gctrace) { |
runtime_printf("gc%d(%d): %llu+%llu+%llu ms %llu -> %llu MB %llu -> %llu (%llu-%llu) objects %llu handoff\n", |
mstats.numgc, work.nproc, (unsigned long long)(t1-t0)/1000000, (unsigned long long)(t2-t1)/1000000, (unsigned long long)(t3-t2)/1000000, |
(unsigned long long)heap0>>20, (unsigned long long)heap1>>20, (unsigned long long)obj0, (unsigned long long)obj1, |
(unsigned long long) mstats.nmalloc, (unsigned long long)mstats.nfree, |
(unsigned long long) nhandoff); |
} |
|
runtime_semrelease(&gcsema); |
|
// If we could have used another helper proc, start one now, |
// in the hope that it will be available next time. |
// It would have been even better to start it before the collection, |
// but doing so requires allocating memory, so it's tricky to |
// coordinate. This lazy approach works out in practice: |
// we don't mind if the first couple gc rounds don't have quite |
// the maximum number of procs. |
runtime_starttheworld(extra); |
|
// give the queued finalizers, if any, a chance to run |
if(finq != nil) |
runtime_gosched(); |
|
if(gctrace > 1 && !force) |
runtime_gc(1); |
} |
|
void runtime_ReadMemStats(MStats *) |
__asm__("libgo_runtime.runtime.ReadMemStats"); |
|
void |
runtime_ReadMemStats(MStats *stats) |
{ |
M *m; |
|
// Have to acquire gcsema to stop the world, |
// because stoptheworld can only be used by |
// one goroutine at a time, and there might be |
// a pending garbage collection already calling it. |
runtime_semacquire(&gcsema); |
m = runtime_m(); |
m->gcing = 1; |
runtime_stoptheworld(); |
cachestats(); |
*stats = mstats; |
m->gcing = 0; |
runtime_semrelease(&gcsema); |
runtime_starttheworld(false); |
} |
|
static void |
runfinq(void* dummy __attribute__ ((unused))) |
{ |
G* gp; |
Finalizer *f; |
FinBlock *fb, *next; |
uint32 i; |
|
gp = runtime_g(); |
for(;;) { |
// There's no need for a lock in this section |
// because it only conflicts with the garbage |
// collector, and the garbage collector only |
// runs when everyone else is stopped, and |
// runfinq only stops at the gosched() or |
// during the calls in the for loop. |
fb = finq; |
finq = nil; |
if(fb == nil) { |
fingwait = 1; |
gp->status = Gwaiting; |
gp->waitreason = "finalizer wait"; |
runtime_gosched(); |
continue; |
} |
for(; fb; fb=next) { |
next = fb->next; |
for(i=0; i<(uint32)fb->cnt; i++) { |
void *params[1]; |
|
f = &fb->fin[i]; |
params[0] = &f->arg; |
runtime_setblockspecial(f->arg, false); |
reflect_call(f->ft, (void*)f->fn, 0, 0, params, nil); |
f->fn = nil; |
f->arg = nil; |
} |
fb->cnt = 0; |
fb->next = finc; |
finc = fb; |
} |
runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible |
} |
} |
|
// mark the block at v of size n as allocated. |
// If noptr is true, mark it as having no pointers. |
void |
runtime_markallocated(void *v, uintptr n, bool noptr) |
{ |
uintptr *b, obits, bits, off, shift; |
|
// if(0) |
// runtime_printf("markallocated %p+%p\n", v, n); |
|
if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) |
runtime_throw("markallocated: bad pointer"); |
|
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
|
for(;;) { |
obits = *b; |
bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift); |
if(noptr) |
bits |= bitNoPointers<<shift; |
if(runtime_singleproc) { |
*b = bits; |
break; |
} else { |
// more than one goroutine is potentially running: use atomic op |
if(runtime_casp((void**)b, (void*)obits, (void*)bits)) |
break; |
} |
} |
} |
|
// mark the block at v of size n as freed. |
void |
runtime_markfreed(void *v, uintptr n) |
{ |
uintptr *b, obits, bits, off, shift; |
|
// if(0) |
// runtime_printf("markallocated %p+%p\n", v, n); |
|
if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) |
runtime_throw("markallocated: bad pointer"); |
|
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
|
for(;;) { |
obits = *b; |
bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); |
if(runtime_singleproc) { |
*b = bits; |
break; |
} else { |
// more than one goroutine is potentially running: use atomic op |
if(runtime_casp((void**)b, (void*)obits, (void*)bits)) |
break; |
} |
} |
} |
|
// check that the block at v of size n is marked freed. |
void |
runtime_checkfreed(void *v, uintptr n) |
{ |
uintptr *b, bits, off, shift; |
|
if(!runtime_checking) |
return; |
|
if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) |
return; // not allocated, so okay |
|
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
|
bits = *b>>shift; |
if((bits & bitAllocated) != 0) { |
runtime_printf("checkfreed %p+%p: off=%p have=%p\n", |
v, (void*)n, (void*)off, (void*)(bits & bitMask)); |
runtime_throw("checkfreed: not freed"); |
} |
} |
|
// mark the span of memory at v as having n blocks of the given size. |
// if leftover is true, there is left over space at the end of the span. |
void |
runtime_markspan(void *v, uintptr size, uintptr n, bool leftover) |
{ |
uintptr *b, off, shift; |
byte *p; |
|
if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) |
runtime_throw("markspan: bad pointer"); |
|
p = v; |
if(leftover) // mark a boundary just past end of last block too |
n++; |
for(; n-- > 0; p += size) { |
// Okay to use non-atomic ops here, because we control |
// the entire span, and each bitmap word has bits for only |
// one span, so no other goroutines are changing these |
// bitmap words. |
off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
*b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift); |
} |
} |
|
// unmark the span of memory at v of length n bytes. |
void |
runtime_unmarkspan(void *v, uintptr n) |
{ |
uintptr *p, *b, off; |
|
if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start) |
runtime_throw("markspan: bad pointer"); |
|
p = v; |
off = p - (uintptr*)runtime_mheap.arena_start; // word offset |
if(off % wordsPerBitmapWord != 0) |
runtime_throw("markspan: unaligned pointer"); |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
n /= PtrSize; |
if(n%wordsPerBitmapWord != 0) |
runtime_throw("unmarkspan: unaligned length"); |
// Okay to use non-atomic ops here, because we control |
// the entire span, and each bitmap word has bits for only |
// one span, so no other goroutines are changing these |
// bitmap words. |
n /= wordsPerBitmapWord; |
while(n-- > 0) |
*b-- = 0; |
} |
|
bool |
runtime_blockspecial(void *v) |
{ |
uintptr *b, off, shift; |
|
if(DebugMark) |
return true; |
|
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
|
return (*b & (bitSpecial<<shift)) != 0; |
} |
|
void |
runtime_setblockspecial(void *v, bool s) |
{ |
uintptr *b, off, shift, bits, obits; |
|
if(DebugMark) |
return; |
|
off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; |
b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1; |
shift = off % wordsPerBitmapWord; |
|
for(;;) { |
obits = *b; |
if(s) |
bits = obits | (bitSpecial<<shift); |
else |
bits = obits & ~(bitSpecial<<shift); |
if(runtime_singleproc) { |
*b = bits; |
break; |
} else { |
// more than one goroutine is potentially running: use atomic op |
if(runtime_casp((void**)b, (void*)obits, (void*)bits)) |
break; |
} |
} |
} |
|
void |
runtime_MHeap_MapBits(MHeap *h) |
{ |
// Caller has added extra mappings to the arena. |
// Add extra mappings of bitmap words as needed. |
// We allocate extra bitmap pieces in chunks of bitmapChunk. |
enum { |
bitmapChunk = 8192 |
}; |
uintptr n; |
|
n = (h->arena_used - h->arena_start) / wordsPerBitmapWord; |
n = (n+bitmapChunk-1) & ~(bitmapChunk-1); |
if(h->bitmap_mapped >= n) |
return; |
|
runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped); |
h->bitmap_mapped = n; |
} |
/go-type-float.c
0,0 → 1,96
/* go-type-float.c -- hash and equality float functions. |
|
Copyright 2012 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "go-type.h" |
|
/* The 32-bit and 64-bit types. */ |
|
typedef unsigned int SItype __attribute__ ((mode (SI))); |
typedef unsigned int DItype __attribute__ ((mode (DI))); |
|
/* Hash function for float types. */ |
|
uintptr_t |
__go_type_hash_float (const void *vkey, uintptr_t key_size) |
{ |
if (key_size == 4) |
{ |
union |
{ |
unsigned char a[4]; |
float f; |
SItype si; |
} uf; |
float f; |
|
__builtin_memcpy (uf.a, vkey, 4); |
f = uf.f; |
if (__builtin_isinff (f) || __builtin_isnanf (f) || f == 0) |
return 0; |
return (uintptr_t) uf.si; |
} |
else if (key_size == 8) |
{ |
union |
{ |
unsigned char a[8]; |
double d; |
DItype di; |
} ud; |
double d; |
|
__builtin_memcpy (ud.a, vkey, 8); |
d = ud.d; |
if (__builtin_isinf (d) || __builtin_isnan (d) || d == 0) |
return 0; |
return (uintptr_t) ud.di; |
} |
else |
runtime_throw ("__go_type_hash_float: invalid float size"); |
} |
|
/* Equality function for float types. */ |
|
_Bool |
__go_type_equal_float (const void *vk1, const void *vk2, uintptr_t key_size) |
{ |
if (key_size == 4) |
{ |
union |
{ |
unsigned char a[4]; |
float f; |
} uf; |
float f1; |
float f2; |
|
__builtin_memcpy (uf.a, vk1, 4); |
f1 = uf.f; |
__builtin_memcpy (uf.a, vk2, 4); |
f2 = uf.f; |
return f1 == f2; |
} |
else if (key_size == 8) |
{ |
union |
{ |
unsigned char a[8]; |
double d; |
DItype di; |
} ud; |
double d1; |
double d2; |
|
__builtin_memcpy (ud.a, vk1, 8); |
d1 = ud.d; |
__builtin_memcpy (ud.a, vk2, 8); |
d2 = ud.d; |
return d1 == d2; |
} |
else |
runtime_throw ("__go_type_equal_float: invalid float size"); |
} |
/thread-sema.c
0,0 → 1,147
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
#include "config.h" |
#include "runtime.h" |
|
#include <errno.h> |
#include <stdlib.h> |
#include <time.h> |
#include <semaphore.h> |
|
/* If we don't have sem_timedwait, use pthread_cond_timedwait instead. |
We don't always use condition variables because on some systems |
pthread_mutex_lock and pthread_mutex_unlock must be called by the |
same thread. That is never true of semaphores. */ |
|
struct go_sem |
{ |
sem_t sem; |
|
#ifndef HAVE_SEM_TIMEDWAIT |
int timedwait; |
pthread_mutex_t mutex; |
pthread_cond_t cond; |
#endif |
}; |
|
/* Create a semaphore. */ |
|
uintptr |
runtime_semacreate(void) |
{ |
struct go_sem *p; |
|
/* Call malloc rather than runtime_malloc. This will allocate space |
on the C heap. We can't call runtime_malloc here because it |
could cause a deadlock. */ |
p = malloc (sizeof (struct go_sem)); |
if (sem_init (&p->sem, 0, 0) != 0) |
runtime_throw ("sem_init"); |
|
#ifndef HAVE_SEM_TIMEDWAIT |
if (pthread_mutex_init (&p->mutex, NULL) != 0) |
runtime_throw ("pthread_mutex_init"); |
if (pthread_cond_init (&p->cond, NULL) != 0) |
runtime_throw ("pthread_cond_init"); |
#endif |
|
return (uintptr) p; |
} |
|
/* Acquire m->waitsema. */ |
|
int32 |
runtime_semasleep (int64 ns) |
{ |
M *m; |
struct go_sem *sem; |
int r; |
|
m = runtime_m (); |
sem = (struct go_sem *) m->waitsema; |
if (ns >= 0) |
{ |
int64 abs; |
struct timespec ts; |
int err; |
|
abs = ns + runtime_nanotime (); |
ts.tv_sec = abs / 1000000000LL; |
ts.tv_nsec = abs % 1000000000LL; |
|
err = 0; |
|
#ifdef HAVE_SEM_TIMEDWAIT |
r = sem_timedwait (&sem->sem, &ts); |
if (r != 0) |
err = errno; |
#else |
if (pthread_mutex_lock (&sem->mutex) != 0) |
runtime_throw ("pthread_mutex_lock"); |
|
while ((r = sem_trywait (&sem->sem)) != 0) |
{ |
r = pthread_cond_timedwait (&sem->cond, &sem->mutex, &ts); |
if (r != 0) |
{ |
err = r; |
break; |
} |
} |
|
if (pthread_mutex_unlock (&sem->mutex) != 0) |
runtime_throw ("pthread_mutex_unlock"); |
#endif |
|
if (err != 0) |
{ |
if (err == ETIMEDOUT || err == EAGAIN || err == EINTR) |
return -1; |
runtime_throw ("sema_timedwait"); |
} |
return 0; |
} |
|
while (sem_wait (&sem->sem) != 0) |
{ |
if (errno == EINTR) |
continue; |
runtime_throw ("sem_wait"); |
} |
|
return 0; |
} |
|
/* Wake up mp->waitsema. */ |
|
void |
runtime_semawakeup (M *mp) |
{ |
struct go_sem *sem; |
|
sem = (struct go_sem *) mp->waitsema; |
if (sem_post (&sem->sem) != 0) |
runtime_throw ("sem_post"); |
|
#ifndef HAVE_SEM_TIMEDWAIT |
if (pthread_mutex_lock (&sem->mutex) != 0) |
runtime_throw ("pthread_mutex_lock"); |
if (pthread_cond_broadcast (&sem->cond) != 0) |
runtime_throw ("pthread_cond_broadcast"); |
if (pthread_mutex_unlock (&sem->mutex) != 0) |
runtime_throw ("pthread_mutex_unlock"); |
#endif |
} |
|
void |
runtime_osinit (void) |
{ |
} |
|
void |
runtime_goenvs (void) |
{ |
runtime_goenvs_unix (); |
} |
/go-eface-compare.c
0,0 → 1,38
/* go-eface-compare.c -- compare two empty values. |
|
Copyright 2010 The Go Authors. All rights reserved. |
Use of this source code is governed by a BSD-style |
license that can be found in the LICENSE file. */ |
|
#include "runtime.h" |
#include "interface.h" |
|
/* Compare two interface values. Return 0 for equal, not zero for not |
equal (return value is like strcmp). */ |
|
int |
__go_empty_interface_compare (struct __go_empty_interface left, |
struct __go_empty_interface right) |
{ |
const struct __go_type_descriptor *left_descriptor; |
|
left_descriptor = left.__type_descriptor; |
|
if (((uintptr_t) left_descriptor & reflectFlags) != 0 |
|| ((uintptr_t) right.__type_descriptor & reflectFlags) != 0) |
runtime_panicstring ("invalid interface value"); |
|
if (left_descriptor == NULL && right.__type_descriptor == NULL) |
return 0; |
if (left_descriptor == NULL || right.__type_descriptor == NULL) |
return 1; |
if (!__go_type_descriptors_equal (left_descriptor, |
right.__type_descriptor)) |
return 1; |
if (__go_is_pointer_type (left_descriptor)) |
return left.__object == right.__object ? 0 : 1; |
if (!left_descriptor->__equalfn (left.__object, right.__object, |
left_descriptor->__size)) |
return 1; |
return 0; |
} |
/runtime1.goc
0,0 → 1,14
// Copyright 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
package runtime |
#include "runtime.h" |
|
func GOMAXPROCS(n int32) (ret int32) { |
ret = runtime_gomaxprocsfunc(n); |
} |
|
func NumCPU() (ret int32) { |
ret = runtime_ncpu; |
} |
/mheap.c
0,0 → 1,377
// Copyright 2009 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
// Page heap. |
// |
// See malloc.h for overview. |
// |
// When a MSpan is in the heap free list, state == MSpanFree |
// and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span. |
// |
// When a MSpan is allocated, state == MSpanInUse |
// and heapmap(i) == span for all s->start <= i < s->start+s->npages. |
|
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32); |
static bool MHeap_Grow(MHeap*, uintptr); |
static void MHeap_FreeLocked(MHeap*, MSpan*); |
static MSpan *MHeap_AllocLarge(MHeap*, uintptr); |
static MSpan *BestFit(MSpan*, uintptr, MSpan*); |
|
static void |
RecordSpan(void *vh, byte *p) |
{ |
MHeap *h; |
MSpan *s; |
|
h = vh; |
s = (MSpan*)p; |
s->allnext = h->allspans; |
h->allspans = s; |
} |
|
// Initialize the heap; fetch memory using alloc. |
void |
runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr)) |
{ |
uint32 i; |
|
runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h); |
runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil); |
// h->mapcache needs no init |
for(i=0; i<nelem(h->free); i++) |
runtime_MSpanList_Init(&h->free[i]); |
runtime_MSpanList_Init(&h->large); |
for(i=0; i<nelem(h->central); i++) |
runtime_MCentral_Init(&h->central[i], i); |
} |
|
// Allocate a new span of npage pages from the heap |
// and record its size class in the HeapMap and HeapMapCache. |
MSpan* |
runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) |
{ |
MSpan *s; |
|
runtime_lock(h); |
runtime_purgecachedstats(runtime_m()); |
s = MHeap_AllocLocked(h, npage, sizeclass); |
if(s != nil) { |
mstats.heap_inuse += npage<<PageShift; |
if(acct) { |
mstats.heap_objects++; |
mstats.heap_alloc += npage<<PageShift; |
} |
} |
runtime_unlock(h); |
return s; |
} |
|
static MSpan* |
MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass) |
{ |
uintptr n; |
MSpan *s, *t; |
PageID p; |
|
// Try in fixed-size lists up to max. |
for(n=npage; n < nelem(h->free); n++) { |
if(!runtime_MSpanList_IsEmpty(&h->free[n])) { |
s = h->free[n].next; |
goto HaveSpan; |
} |
} |
|
// Best fit in list of large spans. |
if((s = MHeap_AllocLarge(h, npage)) == nil) { |
if(!MHeap_Grow(h, npage)) |
return nil; |
if((s = MHeap_AllocLarge(h, npage)) == nil) |
return nil; |
} |
|
HaveSpan: |
// Mark span in use. |
if(s->state != MSpanFree) |
runtime_throw("MHeap_AllocLocked - MSpan not free"); |
if(s->npages < npage) |
runtime_throw("MHeap_AllocLocked - bad npages"); |
runtime_MSpanList_Remove(s); |
s->state = MSpanInUse; |
mstats.heap_idle -= s->npages<<PageShift; |
|
if(s->npages > npage) { |
// Trim extra and put it back in the heap. |
t = runtime_FixAlloc_Alloc(&h->spanalloc); |
mstats.mspan_inuse = h->spanalloc.inuse; |
mstats.mspan_sys = h->spanalloc.sys; |
runtime_MSpan_Init(t, s->start + npage, s->npages - npage); |
s->npages = npage; |
p = t->start; |
if(sizeof(void*) == 8) |
p -= ((uintptr)h->arena_start>>PageShift); |
if(p > 0) |
h->map[p-1] = s; |
h->map[p] = t; |
h->map[p+t->npages-1] = t; |
*(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark |
t->state = MSpanInUse; |
MHeap_FreeLocked(h, t); |
} |
|
if(*(uintptr*)(s->start<<PageShift) != 0) |
runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); |
|
// Record span info, because gc needs to be |
// able to map interior pointer to containing span. |
s->sizeclass = sizeclass; |
p = s->start; |
if(sizeof(void*) == 8) |
p -= ((uintptr)h->arena_start>>PageShift); |
for(n=0; n<npage; n++) |
h->map[p+n] = s; |
return s; |
} |
|
// Allocate a span of exactly npage pages from the list of large spans. |
static MSpan* |
MHeap_AllocLarge(MHeap *h, uintptr npage) |
{ |
return BestFit(&h->large, npage, nil); |
} |
|
// Search list for smallest span with >= npage pages. |
// If there are multiple smallest spans, take the one |
// with the earliest starting address. |
static MSpan* |
BestFit(MSpan *list, uintptr npage, MSpan *best) |
{ |
MSpan *s; |
|
for(s=list->next; s != list; s=s->next) { |
if(s->npages < npage) |
continue; |
if(best == nil |
|| s->npages < best->npages |
|| (s->npages == best->npages && s->start < best->start)) |
best = s; |
} |
return best; |
} |
|
// Try to add at least npage pages of memory to the heap, |
// returning whether it worked. |
static bool |
MHeap_Grow(MHeap *h, uintptr npage) |
{ |
uintptr ask; |
void *v; |
MSpan *s; |
PageID p; |
|
// Ask for a big chunk, to reduce the number of mappings |
// the operating system needs to track; also amortizes |
// the overhead of an operating system mapping. |
// Allocate a multiple of 64kB (16 pages). |
npage = (npage+15)&~15; |
ask = npage<<PageShift; |
if(ask < HeapAllocChunk) |
ask = HeapAllocChunk; |
|
v = runtime_MHeap_SysAlloc(h, ask); |
if(v == nil) { |
if(ask > (npage<<PageShift)) { |
ask = npage<<PageShift; |
v = runtime_MHeap_SysAlloc(h, ask); |
} |
if(v == nil) { |
runtime_printf("runtime: out of memory: cannot allocate %llu-byte block (%llu in use)\n", (unsigned long long)ask, (unsigned long long)mstats.heap_sys); |
return false; |
} |
} |
mstats.heap_sys += ask; |
|
// Create a fake "in use" span and free it, so that the |
// right coalescing happens. |
s = runtime_FixAlloc_Alloc(&h->spanalloc); |
mstats.mspan_inuse = h->spanalloc.inuse; |
mstats.mspan_sys = h->spanalloc.sys; |
runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift); |
p = s->start; |
if(sizeof(void*) == 8) |
p -= ((uintptr)h->arena_start>>PageShift); |
h->map[p] = s; |
h->map[p + s->npages - 1] = s; |
s->state = MSpanInUse; |
MHeap_FreeLocked(h, s); |
return true; |
} |
|
// Look up the span at the given address. |
// Address is guaranteed to be in map |
// and is guaranteed to be start or end of span. |
MSpan* |
runtime_MHeap_Lookup(MHeap *h, void *v) |
{ |
uintptr p; |
|
p = (uintptr)v; |
if(sizeof(void*) == 8) |
p -= (uintptr)h->arena_start; |
return h->map[p >> PageShift]; |
} |
|
// Look up the span at the given address. |
// Address is *not* guaranteed to be in map |
// and may be anywhere in the span. |
// Map entries for the middle of a span are only |
// valid for allocated spans. Free spans may have |
// other garbage in their middles, so we have to |
// check for that. |
MSpan* |
runtime_MHeap_LookupMaybe(MHeap *h, void *v) |
{ |
MSpan *s; |
PageID p, q; |
|
if((byte*)v < h->arena_start || (byte*)v >= h->arena_used) |
return nil; |
p = (uintptr)v>>PageShift; |
q = p; |
if(sizeof(void*) == 8) |
q -= (uintptr)h->arena_start >> PageShift; |
s = h->map[q]; |
if(s == nil || p < s->start || p - s->start >= s->npages) |
return nil; |
if(s->state != MSpanInUse) |
return nil; |
return s; |
} |
|
// Free the span back into the heap. |
void |
runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct) |
{ |
runtime_lock(h); |
runtime_purgecachedstats(runtime_m()); |
mstats.heap_inuse -= s->npages<<PageShift; |
if(acct) { |
mstats.heap_alloc -= s->npages<<PageShift; |
mstats.heap_objects--; |
} |
MHeap_FreeLocked(h, s); |
runtime_unlock(h); |
} |
|
static void |
MHeap_FreeLocked(MHeap *h, MSpan *s) |
{ |
uintptr *sp, *tp; |
MSpan *t; |
PageID p; |
|
if(s->state != MSpanInUse || s->ref != 0) { |
// runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref); |
runtime_throw("MHeap_FreeLocked - invalid free"); |
} |
mstats.heap_idle += s->npages<<PageShift; |
s->state = MSpanFree; |
runtime_MSpanList_Remove(s); |
sp = (uintptr*)(s->start<<PageShift); |
|
// Coalesce with earlier, later spans. |
p = s->start; |
if(sizeof(void*) == 8) |
p -= (uintptr)h->arena_start >> PageShift; |
if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) { |
tp = (uintptr*)(t->start<<PageShift); |
*tp |= *sp; // propagate "needs zeroing" mark |
s->start = t->start; |
s->npages += t->npages; |
p -= t->npages; |
h->map[p] = s; |
runtime_MSpanList_Remove(t); |
t->state = MSpanDead; |
runtime_FixAlloc_Free(&h->spanalloc, t); |
mstats.mspan_inuse = h->spanalloc.inuse; |
mstats.mspan_sys = h->spanalloc.sys; |
} |
if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) { |
tp = (uintptr*)(t->start<<PageShift); |
*sp |= *tp; // propagate "needs zeroing" mark |
s->npages += t->npages; |
h->map[p + s->npages - 1] = s; |
runtime_MSpanList_Remove(t); |
t->state = MSpanDead; |
runtime_FixAlloc_Free(&h->spanalloc, t); |
mstats.mspan_inuse = h->spanalloc.inuse; |
mstats.mspan_sys = h->spanalloc.sys; |
} |
|
// Insert s into appropriate list. |
if(s->npages < nelem(h->free)) |
runtime_MSpanList_Insert(&h->free[s->npages], s); |
else |
runtime_MSpanList_Insert(&h->large, s); |
|
// TODO(rsc): IncrementalScavenge() to return memory to OS. |
} |
|
// Initialize a new span with the given start and npages. |
void |
runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages) |
{ |
span->next = nil; |
span->prev = nil; |
span->start = start; |
span->npages = npages; |
span->freelist = nil; |
span->ref = 0; |
span->sizeclass = 0; |
span->state = 0; |
} |
|
// Initialize an empty doubly-linked list. |
void |
runtime_MSpanList_Init(MSpan *list) |
{ |
list->state = MSpanListHead; |
list->next = list; |
list->prev = list; |
} |
|
void |
runtime_MSpanList_Remove(MSpan *span) |
{ |
if(span->prev == nil && span->next == nil) |
return; |
span->prev->next = span->next; |
span->next->prev = span->prev; |
span->prev = nil; |
span->next = nil; |
} |
|
bool |
runtime_MSpanList_IsEmpty(MSpan *list) |
{ |
return list->next == list; |
} |
|
void |
runtime_MSpanList_Insert(MSpan *list, MSpan *span) |
{ |
if(span->next != nil || span->prev != nil) { |
// runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev); |
runtime_throw("MSpanList_Insert"); |
} |
span->next = list->next; |
span->prev = list; |
span->next->prev = span; |
span->prev->next = span; |
} |
|
|
/string.goc
0,0 → 1,78
// Copyright 2009, 2010 The Go Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style |
// license that can be found in the LICENSE file. |
|
package runtime |
#include "runtime.h" |
#include "arch.h" |
#include "malloc.h" |
|
#define charntorune(pv, str, len) __go_get_rune(str, len, pv) |
|
int32 |
runtime_findnull(const byte *s) |
{ |
if(s == nil) |
return 0; |
return __builtin_strlen((const char*) s); |
} |
|
String |
runtime_gostringnocopy(const byte *str) |
{ |
String s; |
|
s.__data = (const unsigned char *) str; |
s.__length = runtime_findnull(str); |
return s; |
} |
|
enum |
{ |
Runeself = 0x80, |
}; |
|
func stringiter(s String, k int32) (retk int32) { |
int32 l, n; |
|
if(k >= s.__length) { |
// retk=0 is end of iteration |
retk = 0; |
goto out; |
} |
|
l = s.__data[k]; |
if(l < Runeself) { |
retk = k+1; |
goto out; |
} |
|
// multi-char rune |
n = charntorune(&l, s.__data+k, s.__length-k); |
retk = k + (n ? n : 1); |
|
out: |
} |
|
func stringiter2(s String, k int32) (retk int32, retv int32) { |
int32 n; |
|
if(k >= s.__length) { |
// retk=0 is end of iteration |
retk = 0; |
retv = 0; |
goto out; |
} |
|
retv = s.__data[k]; |
if(retv < Runeself) { |
retk = k+1; |
goto out; |
} |
|
// multi-char rune |
n = charntorune(&retv, s.__data+k, s.__length-k); |
retk = k + (n ? n : 1); |
|
out: |
} |