1 |
38 |
julius |
/* Mudflap: narrow-pointer bounds-checking by tree rewriting.
|
2 |
|
|
Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
|
3 |
|
|
Contributed by Frank Ch. Eigler <fche@redhat.com>
|
4 |
|
|
and Graydon Hoare <graydon@redhat.com>
|
5 |
|
|
|
6 |
|
|
This file is part of GCC.
|
7 |
|
|
|
8 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
9 |
|
|
the terms of the GNU General Public License as published by the Free
|
10 |
|
|
Software Foundation; either version 2, or (at your option) any later
|
11 |
|
|
version.
|
12 |
|
|
|
13 |
|
|
In addition to the permissions in the GNU General Public License, the
|
14 |
|
|
Free Software Foundation gives you unlimited permission to link the
|
15 |
|
|
compiled version of this file into combinations with other programs,
|
16 |
|
|
and to distribute those combinations without any restriction coming
|
17 |
|
|
from the use of this file. (The General Public License restrictions
|
18 |
|
|
do apply in other respects; for example, they cover modification of
|
19 |
|
|
the file, and distribution when not linked into a combine
|
20 |
|
|
executable.)
|
21 |
|
|
|
22 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
23 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
24 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
25 |
|
|
for more details.
|
26 |
|
|
|
27 |
|
|
You should have received a copy of the GNU General Public License
|
28 |
|
|
along with GCC; see the file COPYING. If not, write to the Free
|
29 |
|
|
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
|
30 |
|
|
02110-1301, USA. */
|
31 |
|
|
|
32 |
|
|
|
33 |
|
|
#include "config.h"
|
34 |
|
|
|
35 |
|
|
#ifndef HAVE_SOCKLEN_T
|
36 |
|
|
#define socklen_t int
|
37 |
|
|
#endif
|
38 |
|
|
|
39 |
|
|
/* These attempt to coax various unix flavours to declare all our
|
40 |
|
|
needed tidbits in the system headers. */
|
41 |
|
|
#if !defined(__FreeBSD__) && !defined(__APPLE__)
|
42 |
|
|
#define _POSIX_SOURCE
|
43 |
|
|
#endif /* Some BSDs break <sys/socket.h> if this is defined. */
|
44 |
|
|
#define _GNU_SOURCE
|
45 |
|
|
#define _XOPEN_SOURCE
|
46 |
|
|
#define _BSD_TYPES
|
47 |
|
|
#define __EXTENSIONS__
|
48 |
|
|
#define _ALL_SOURCE
|
49 |
|
|
#define _LARGE_FILE_API
|
50 |
|
|
#define _XOPEN_SOURCE_EXTENDED 1
|
51 |
|
|
|
52 |
|
|
#include <string.h>
|
53 |
|
|
#include <stdio.h>
|
54 |
|
|
#include <stdlib.h>
|
55 |
|
|
#include <unistd.h>
|
56 |
|
|
#include <assert.h>
|
57 |
|
|
#include <errno.h>
|
58 |
|
|
#include <stdbool.h>
|
59 |
|
|
|
60 |
|
|
#include "mf-runtime.h"
|
61 |
|
|
#include "mf-impl.h"
|
62 |
|
|
|
63 |
|
|
#ifdef _MUDFLAP
|
64 |
|
|
#error "Do not compile this file with -fmudflap!"
|
65 |
|
|
#endif
|
66 |
|
|
|
67 |
|
|
#ifndef LIBMUDFLAPTH
|
68 |
|
|
#error "pthreadstuff is to be included only in libmudflapth"
|
69 |
|
|
#endif
|
70 |
|
|
|
71 |
|
|
/* ??? Why isn't this done once in the header files. */
|
72 |
|
|
DECLARE(void *, malloc, size_t sz);
|
73 |
|
|
DECLARE(void, free, void *ptr);
|
74 |
|
|
DECLARE(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
75 |
|
|
void * (*start) (void *), void *arg);
|
76 |
|
|
|
77 |
|
|
|
78 |
|
|
/* Multithreading support hooks. */
|
79 |
|
|
|
80 |
|
|
|
81 |
|
|
#ifndef HAVE_TLS
|
82 |
|
|
/* We don't have TLS. Ordinarily we could use pthread keys, but since we're
|
83 |
|
|
commandeering malloc/free that presents a few problems. The first is that
|
84 |
|
|
we'll recurse from __mf_get_state to pthread_setspecific to malloc back to
|
85 |
|
|
__mf_get_state during thread startup. This can be solved with clever uses
|
86 |
|
|
of a mutex. The second problem is that thread shutdown is indistinguishable
|
87 |
|
|
from thread startup, since libpthread is deallocating our state variable.
|
88 |
|
|
I've no good solution for this.
|
89 |
|
|
|
90 |
|
|
Which leaves us to handle this mess by totally by hand. */
|
91 |
|
|
|
92 |
|
|
/* Yes, we want this prime. If pthread_t is a pointer, it's almost always
|
93 |
|
|
page aligned, and if we use a smaller power of 2, this results in "%N"
|
94 |
|
|
being the worst possible hash -- all threads hash to zero. */
|
95 |
|
|
#define LIBMUDFLAPTH_THREADS_MAX 1021
|
96 |
|
|
|
97 |
|
|
struct mf_thread_data
|
98 |
|
|
{
|
99 |
|
|
pthread_t self;
|
100 |
|
|
unsigned char used_p;
|
101 |
|
|
unsigned char state;
|
102 |
|
|
};
|
103 |
|
|
|
104 |
|
|
static struct mf_thread_data mf_thread_data[LIBMUDFLAPTH_THREADS_MAX];
|
105 |
|
|
static pthread_mutex_t mf_thread_data_lock = PTHREAD_MUTEX_INITIALIZER;
|
106 |
|
|
|
107 |
|
|
#define PTHREAD_HASH(p) ((unsigned long) (p) % LIBMUDFLAPTH_THREADS_MAX)
|
108 |
|
|
|
109 |
|
|
static struct mf_thread_data *
|
110 |
|
|
__mf_find_threadinfo (int alloc)
|
111 |
|
|
{
|
112 |
|
|
pthread_t self = pthread_self ();
|
113 |
|
|
unsigned long hash = PTHREAD_HASH (self);
|
114 |
|
|
unsigned long rehash;
|
115 |
|
|
|
116 |
|
|
#ifdef __alpha__
|
117 |
|
|
/* Alpha has the loosest memory ordering rules of all. We need a memory
|
118 |
|
|
barrier to flush the reorder buffer before considering a *read* of a
|
119 |
|
|
shared variable. Since we're not always taking a lock, we have to do
|
120 |
|
|
this by hand. */
|
121 |
|
|
__sync_synchronize ();
|
122 |
|
|
#endif
|
123 |
|
|
|
124 |
|
|
rehash = hash;
|
125 |
|
|
while (1)
|
126 |
|
|
{
|
127 |
|
|
if (mf_thread_data[rehash].used_p && mf_thread_data[rehash].self == self)
|
128 |
|
|
return &mf_thread_data[rehash];
|
129 |
|
|
|
130 |
|
|
rehash += 7;
|
131 |
|
|
if (rehash >= LIBMUDFLAPTH_THREADS_MAX)
|
132 |
|
|
rehash -= LIBMUDFLAPTH_THREADS_MAX;
|
133 |
|
|
if (rehash == hash)
|
134 |
|
|
break;
|
135 |
|
|
}
|
136 |
|
|
|
137 |
|
|
if (alloc)
|
138 |
|
|
{
|
139 |
|
|
pthread_mutex_lock (&mf_thread_data_lock);
|
140 |
|
|
|
141 |
|
|
rehash = hash;
|
142 |
|
|
while (1)
|
143 |
|
|
{
|
144 |
|
|
if (!mf_thread_data[rehash].used_p)
|
145 |
|
|
{
|
146 |
|
|
mf_thread_data[rehash].self = self;
|
147 |
|
|
__sync_synchronize ();
|
148 |
|
|
mf_thread_data[rehash].used_p = 1;
|
149 |
|
|
|
150 |
|
|
pthread_mutex_unlock (&mf_thread_data_lock);
|
151 |
|
|
return &mf_thread_data[rehash];
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
rehash += 7;
|
155 |
|
|
if (rehash >= LIBMUDFLAPTH_THREADS_MAX)
|
156 |
|
|
rehash -= LIBMUDFLAPTH_THREADS_MAX;
|
157 |
|
|
if (rehash == hash)
|
158 |
|
|
break;
|
159 |
|
|
}
|
160 |
|
|
|
161 |
|
|
pthread_mutex_unlock (&mf_thread_data_lock);
|
162 |
|
|
}
|
163 |
|
|
|
164 |
|
|
return NULL;
|
165 |
|
|
}
|
166 |
|
|
|
167 |
|
|
enum __mf_state_enum
|
168 |
|
|
__mf_get_state (void)
|
169 |
|
|
{
|
170 |
|
|
struct mf_thread_data *data = __mf_find_threadinfo (0);
|
171 |
|
|
if (data)
|
172 |
|
|
return data->state;
|
173 |
|
|
|
174 |
|
|
/* If we've never seen this thread before, consider it to be in the
|
175 |
|
|
reentrant state. The state gets reset to active for the main thread
|
176 |
|
|
in __mf_init, and for child threads in __mf_pthread_spawner.
|
177 |
|
|
|
178 |
|
|
The trickiest bit here is that the LinuxThreads pthread_manager thread
|
179 |
|
|
should *always* be considered to be reentrant, so that none of our
|
180 |
|
|
hooks actually do anything. Why? Because that thread isn't a real
|
181 |
|
|
thread from the point of view of the thread library, and so lots of
|
182 |
|
|
stuff isn't initialized, leading to SEGV very quickly. Even calling
|
183 |
|
|
pthread_self is a bit suspect, but it happens to work. */
|
184 |
|
|
|
185 |
|
|
return reentrant;
|
186 |
|
|
}
|
187 |
|
|
|
188 |
|
|
void
|
189 |
|
|
__mf_set_state (enum __mf_state_enum new_state)
|
190 |
|
|
{
|
191 |
|
|
struct mf_thread_data *data = __mf_find_threadinfo (1);
|
192 |
|
|
data->state = new_state;
|
193 |
|
|
}
|
194 |
|
|
#endif
|
195 |
|
|
|
196 |
|
|
/* The following two functions are used only with __mf_opts.heur_std_data.
|
197 |
|
|
We're interested in recording the location of the thread-local errno
|
198 |
|
|
variable.
|
199 |
|
|
|
200 |
|
|
Note that this doesn't handle TLS references in general; we have no
|
201 |
|
|
visibility into __tls_get_data for when that memory is allocated at
|
202 |
|
|
runtime. Hopefully we get to see the malloc or mmap operation that
|
203 |
|
|
eventually allocates the backing store. */
|
204 |
|
|
|
205 |
|
|
/* Describe the startup information for a new user thread. */
|
206 |
|
|
struct mf_thread_start_info
|
207 |
|
|
{
|
208 |
|
|
/* The user's thread entry point and argument. */
|
209 |
|
|
void * (*user_fn)(void *);
|
210 |
|
|
void *user_arg;
|
211 |
|
|
};
|
212 |
|
|
|
213 |
|
|
|
214 |
|
|
static void
|
215 |
|
|
__mf_pthread_cleanup (void *arg)
|
216 |
|
|
{
|
217 |
|
|
if (__mf_opts.heur_std_data)
|
218 |
|
|
__mf_unregister (&errno, sizeof (errno), __MF_TYPE_GUESS);
|
219 |
|
|
|
220 |
|
|
#ifndef HAVE_TLS
|
221 |
|
|
struct mf_thread_data *data = __mf_find_threadinfo (0);
|
222 |
|
|
if (data)
|
223 |
|
|
data->used_p = 0;
|
224 |
|
|
#endif
|
225 |
|
|
}
|
226 |
|
|
|
227 |
|
|
|
228 |
|
|
static void *
|
229 |
|
|
__mf_pthread_spawner (void *arg)
|
230 |
|
|
{
|
231 |
|
|
void *result = NULL;
|
232 |
|
|
|
233 |
|
|
__mf_set_state (active);
|
234 |
|
|
|
235 |
|
|
/* NB: We could use __MF_TYPE_STATIC here, but we guess that the thread
|
236 |
|
|
errno is coming out of some dynamically allocated pool that we already
|
237 |
|
|
know of as __MF_TYPE_HEAP. */
|
238 |
|
|
if (__mf_opts.heur_std_data)
|
239 |
|
|
__mf_register (&errno, sizeof (errno), __MF_TYPE_GUESS,
|
240 |
|
|
"errno area (thread)");
|
241 |
|
|
|
242 |
|
|
/* We considered using pthread_key_t objects instead of these
|
243 |
|
|
cleanup stacks, but they were less cooperative with the
|
244 |
|
|
interposed malloc hooks in libmudflap. */
|
245 |
|
|
/* ??? The pthread_key_t problem is solved above... */
|
246 |
|
|
pthread_cleanup_push (__mf_pthread_cleanup, NULL);
|
247 |
|
|
|
248 |
|
|
/* Extract given entry point and argument. */
|
249 |
|
|
struct mf_thread_start_info *psi = arg;
|
250 |
|
|
void * (*user_fn)(void *) = psi->user_fn;
|
251 |
|
|
void *user_arg = psi->user_arg;
|
252 |
|
|
CALL_REAL (free, arg);
|
253 |
|
|
|
254 |
|
|
result = (*user_fn)(user_arg);
|
255 |
|
|
|
256 |
|
|
pthread_cleanup_pop (1 /* execute */);
|
257 |
|
|
|
258 |
|
|
return result;
|
259 |
|
|
}
|
260 |
|
|
|
261 |
|
|
|
262 |
|
|
#if PIC
|
263 |
|
|
/* A special bootstrap variant. */
|
264 |
|
|
int
|
265 |
|
|
__mf_0fn_pthread_create (pthread_t *thr, const pthread_attr_t *attr,
|
266 |
|
|
void * (*start) (void *), void *arg)
|
267 |
|
|
{
|
268 |
|
|
return -1;
|
269 |
|
|
}
|
270 |
|
|
#endif
|
271 |
|
|
|
272 |
|
|
|
273 |
|
|
#undef pthread_create
|
274 |
|
|
WRAPPER(int, pthread_create, pthread_t *thr, const pthread_attr_t *attr,
|
275 |
|
|
void * (*start) (void *), void *arg)
|
276 |
|
|
{
|
277 |
|
|
struct mf_thread_start_info *si;
|
278 |
|
|
|
279 |
|
|
TRACE ("pthread_create\n");
|
280 |
|
|
|
281 |
|
|
/* Fill in startup-control fields. */
|
282 |
|
|
si = CALL_REAL (malloc, sizeof (*si));
|
283 |
|
|
si->user_fn = start;
|
284 |
|
|
si->user_arg = arg;
|
285 |
|
|
|
286 |
|
|
/* Actually create the thread. */
|
287 |
|
|
return CALL_REAL (pthread_create, thr, attr, __mf_pthread_spawner, si);
|
288 |
|
|
}
|