1 |
273 |
jeremybenn |
/* Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
|
2 |
|
|
Contributed by Richard Henderson <rth@redhat.com>.
|
3 |
|
|
|
4 |
|
|
This file is part of the GNU OpenMP Library (libgomp).
|
5 |
|
|
|
6 |
|
|
Libgomp is free software; you can redistribute it and/or modify it
|
7 |
|
|
under the terms of the GNU General Public License as published by
|
8 |
|
|
the Free Software Foundation; either version 3, or (at your option)
|
9 |
|
|
any later version.
|
10 |
|
|
|
11 |
|
|
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
|
12 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
13 |
|
|
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
14 |
|
|
more details.
|
15 |
|
|
|
16 |
|
|
Under Section 7 of GPL version 3, you are granted additional
|
17 |
|
|
permissions described in the GCC Runtime Library Exception, version
|
18 |
|
|
3.1, as published by the Free Software Foundation.
|
19 |
|
|
|
20 |
|
|
You should have received a copy of the GNU General Public License and
|
21 |
|
|
a copy of the GCC Runtime Library Exception along with this program;
|
22 |
|
|
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
23 |
|
|
<http://www.gnu.org/licenses/>. */
|
24 |
|
|
|
25 |
|
|
/* This file contains data types and function declarations that are not
|
26 |
|
|
part of the official OpenMP user interface. There are declarations
|
27 |
|
|
in here that are part of the GNU OpenMP ABI, in that the compiler is
|
28 |
|
|
required to know about them and use them.
|
29 |
|
|
|
30 |
|
|
The convention is that the all caps prefix "GOMP" is used group items
|
31 |
|
|
that are part of the external ABI, and the lower case prefix "gomp"
|
32 |
|
|
is used group items that are completely private to the library. */
|
33 |
|
|
|
34 |
|
|
#ifndef LIBGOMP_H
|
35 |
|
|
#define LIBGOMP_H 1
|
36 |
|
|
|
37 |
|
|
#include "config.h"
|
38 |
|
|
#include "gstdint.h"
|
39 |
|
|
|
40 |
|
|
#include <pthread.h>
|
41 |
|
|
#include <stdbool.h>
|
42 |
|
|
|
43 |
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
44 |
|
|
# pragma GCC visibility push(hidden)
|
45 |
|
|
#endif
|
46 |
|
|
|
47 |
|
|
#include "sem.h"
|
48 |
|
|
#include "mutex.h"
|
49 |
|
|
#include "bar.h"
|
50 |
|
|
#include "ptrlock.h"
|
51 |
|
|
|
52 |
|
|
|
53 |
|
|
/* This structure contains the data to control one work-sharing construct,
|
54 |
|
|
either a LOOP (FOR/DO) or a SECTIONS. */
|
55 |
|
|
|
56 |
|
|
enum gomp_schedule_type
|
57 |
|
|
{
|
58 |
|
|
GFS_RUNTIME,
|
59 |
|
|
GFS_STATIC,
|
60 |
|
|
GFS_DYNAMIC,
|
61 |
|
|
GFS_GUIDED,
|
62 |
|
|
GFS_AUTO
|
63 |
|
|
};
|
64 |
|
|
|
65 |
|
|
struct gomp_work_share
|
66 |
|
|
{
|
67 |
|
|
/* This member records the SCHEDULE clause to be used for this construct.
|
68 |
|
|
The user specification of "runtime" will already have been resolved.
|
69 |
|
|
If this is a SECTIONS construct, this value will always be DYNAMIC. */
|
70 |
|
|
enum gomp_schedule_type sched;
|
71 |
|
|
|
72 |
|
|
int mode;
|
73 |
|
|
|
74 |
|
|
union {
|
75 |
|
|
struct {
|
76 |
|
|
/* This is the chunk_size argument to the SCHEDULE clause. */
|
77 |
|
|
long chunk_size;
|
78 |
|
|
|
79 |
|
|
/* This is the iteration end point. If this is a SECTIONS construct,
|
80 |
|
|
this is the number of contained sections. */
|
81 |
|
|
long end;
|
82 |
|
|
|
83 |
|
|
/* This is the iteration step. If this is a SECTIONS construct, this
|
84 |
|
|
is always 1. */
|
85 |
|
|
long incr;
|
86 |
|
|
};
|
87 |
|
|
|
88 |
|
|
struct {
|
89 |
|
|
/* The same as above, but for the unsigned long long loop variants. */
|
90 |
|
|
unsigned long long chunk_size_ull;
|
91 |
|
|
unsigned long long end_ull;
|
92 |
|
|
unsigned long long incr_ull;
|
93 |
|
|
};
|
94 |
|
|
};
|
95 |
|
|
|
96 |
|
|
/* This is a circular queue that details which threads will be allowed
|
97 |
|
|
into the ordered region and in which order. When a thread allocates
|
98 |
|
|
iterations on which it is going to work, it also registers itself at
|
99 |
|
|
the end of the array. When a thread reaches the ordered region, it
|
100 |
|
|
checks to see if it is the one at the head of the queue. If not, it
|
101 |
|
|
blocks on its RELEASE semaphore. */
|
102 |
|
|
unsigned *ordered_team_ids;
|
103 |
|
|
|
104 |
|
|
/* This is the number of threads that have registered themselves in
|
105 |
|
|
the circular queue ordered_team_ids. */
|
106 |
|
|
unsigned ordered_num_used;
|
107 |
|
|
|
108 |
|
|
/* This is the team_id of the currently acknowledged owner of the ordered
|
109 |
|
|
section, or -1u if the ordered section has not been acknowledged by
|
110 |
|
|
any thread. This is distinguished from the thread that is *allowed*
|
111 |
|
|
to take the section next. */
|
112 |
|
|
unsigned ordered_owner;
|
113 |
|
|
|
114 |
|
|
/* This is the index into the circular queue ordered_team_ids of the
|
115 |
|
|
current thread that's allowed into the ordered reason. */
|
116 |
|
|
unsigned ordered_cur;
|
117 |
|
|
|
118 |
|
|
/* This is a chain of allocated gomp_work_share blocks, valid only
|
119 |
|
|
in the first gomp_work_share struct in the block. */
|
120 |
|
|
struct gomp_work_share *next_alloc;
|
121 |
|
|
|
122 |
|
|
/* The above fields are written once during workshare initialization,
|
123 |
|
|
or related to ordered worksharing. Make sure the following fields
|
124 |
|
|
are in a different cache line. */
|
125 |
|
|
|
126 |
|
|
/* This lock protects the update of the following members. */
|
127 |
|
|
gomp_mutex_t lock __attribute__((aligned (64)));
|
128 |
|
|
|
129 |
|
|
/* This is the count of the number of threads that have exited the work
|
130 |
|
|
share construct. If the construct was marked nowait, they have moved on
|
131 |
|
|
to other work; otherwise they're blocked on a barrier. The last member
|
132 |
|
|
of the team to exit the work share construct must deallocate it. */
|
133 |
|
|
unsigned threads_completed;
|
134 |
|
|
|
135 |
|
|
union {
|
136 |
|
|
/* This is the next iteration value to be allocated. In the case of
|
137 |
|
|
GFS_STATIC loops, this the iteration start point and never changes. */
|
138 |
|
|
long next;
|
139 |
|
|
|
140 |
|
|
/* The same, but with unsigned long long type. */
|
141 |
|
|
unsigned long long next_ull;
|
142 |
|
|
|
143 |
|
|
/* This is the returned data structure for SINGLE COPYPRIVATE. */
|
144 |
|
|
void *copyprivate;
|
145 |
|
|
};
|
146 |
|
|
|
147 |
|
|
union {
|
148 |
|
|
/* Link to gomp_work_share struct for next work sharing construct
|
149 |
|
|
encountered after this one. */
|
150 |
|
|
gomp_ptrlock_t next_ws;
|
151 |
|
|
|
152 |
|
|
/* gomp_work_share structs are chained in the free work share cache
|
153 |
|
|
through this. */
|
154 |
|
|
struct gomp_work_share *next_free;
|
155 |
|
|
};
|
156 |
|
|
|
157 |
|
|
/* If only few threads are in the team, ordered_team_ids can point
|
158 |
|
|
to this array which fills the padding at the end of this struct. */
|
159 |
|
|
unsigned inline_ordered_team_ids[0];
|
160 |
|
|
};
|
161 |
|
|
|
162 |
|
|
/* This structure contains all of the thread-local data associated with
|
163 |
|
|
a thread team. This is the data that must be saved when a thread
|
164 |
|
|
encounters a nested PARALLEL construct. */
|
165 |
|
|
|
166 |
|
|
struct gomp_team_state
|
167 |
|
|
{
|
168 |
|
|
/* This is the team of which the thread is currently a member. */
|
169 |
|
|
struct gomp_team *team;
|
170 |
|
|
|
171 |
|
|
/* This is the work share construct which this thread is currently
|
172 |
|
|
processing. Recall that with NOWAIT, not all threads may be
|
173 |
|
|
processing the same construct. */
|
174 |
|
|
struct gomp_work_share *work_share;
|
175 |
|
|
|
176 |
|
|
/* This is the previous work share construct or NULL if there wasn't any.
|
177 |
|
|
When all threads are done with the current work sharing construct,
|
178 |
|
|
the previous one can be freed. The current one can't, as its
|
179 |
|
|
next_ws field is used. */
|
180 |
|
|
struct gomp_work_share *last_work_share;
|
181 |
|
|
|
182 |
|
|
/* This is the ID of this thread within the team. This value is
|
183 |
|
|
guaranteed to be between 0 and N-1, where N is the number of
|
184 |
|
|
threads in the team. */
|
185 |
|
|
unsigned team_id;
|
186 |
|
|
|
187 |
|
|
/* Nesting level. */
|
188 |
|
|
unsigned level;
|
189 |
|
|
|
190 |
|
|
/* Active nesting level. Only active parallel regions are counted. */
|
191 |
|
|
unsigned active_level;
|
192 |
|
|
|
193 |
|
|
#ifdef HAVE_SYNC_BUILTINS
|
194 |
|
|
/* Number of single stmts encountered. */
|
195 |
|
|
unsigned long single_count;
|
196 |
|
|
#endif
|
197 |
|
|
|
198 |
|
|
/* For GFS_RUNTIME loops that resolved to GFS_STATIC, this is the
|
199 |
|
|
trip number through the loop. So first time a particular loop
|
200 |
|
|
is encountered this number is 0, the second time through the loop
|
201 |
|
|
is 1, etc. This is unused when the compiler knows in advance that
|
202 |
|
|
the loop is statically scheduled. */
|
203 |
|
|
unsigned long static_trip;
|
204 |
|
|
};
|
205 |
|
|
|
206 |
|
|
/* These are the OpenMP 3.0 Internal Control Variables described in
|
207 |
|
|
section 2.3.1. Those described as having one copy per task are
|
208 |
|
|
stored within the structure; those described as having one copy
|
209 |
|
|
for the whole program are (naturally) global variables. */
|
210 |
|
|
|
211 |
|
|
struct gomp_task_icv
|
212 |
|
|
{
|
213 |
|
|
unsigned long nthreads_var;
|
214 |
|
|
enum gomp_schedule_type run_sched_var;
|
215 |
|
|
int run_sched_modifier;
|
216 |
|
|
bool dyn_var;
|
217 |
|
|
bool nest_var;
|
218 |
|
|
};
|
219 |
|
|
|
220 |
|
|
extern struct gomp_task_icv gomp_global_icv;
|
221 |
|
|
extern unsigned long gomp_thread_limit_var;
|
222 |
|
|
extern unsigned long gomp_remaining_threads_count;
|
223 |
|
|
#ifndef HAVE_SYNC_BUILTINS
|
224 |
|
|
extern gomp_mutex_t gomp_remaining_threads_lock;
|
225 |
|
|
#endif
|
226 |
|
|
extern unsigned long gomp_max_active_levels_var;
|
227 |
|
|
extern unsigned long long gomp_spin_count_var, gomp_throttled_spin_count_var;
|
228 |
|
|
extern unsigned long gomp_available_cpus, gomp_managed_threads;
|
229 |
|
|
|
230 |
|
|
enum gomp_task_kind
|
231 |
|
|
{
|
232 |
|
|
GOMP_TASK_IMPLICIT,
|
233 |
|
|
GOMP_TASK_IFFALSE,
|
234 |
|
|
GOMP_TASK_WAITING,
|
235 |
|
|
GOMP_TASK_TIED
|
236 |
|
|
};
|
237 |
|
|
|
238 |
|
|
/* This structure describes a "task" to be run by a thread. */
|
239 |
|
|
|
240 |
|
|
struct gomp_task
|
241 |
|
|
{
|
242 |
|
|
struct gomp_task *parent;
|
243 |
|
|
struct gomp_task *children;
|
244 |
|
|
struct gomp_task *next_child;
|
245 |
|
|
struct gomp_task *prev_child;
|
246 |
|
|
struct gomp_task *next_queue;
|
247 |
|
|
struct gomp_task *prev_queue;
|
248 |
|
|
struct gomp_task_icv icv;
|
249 |
|
|
void (*fn) (void *);
|
250 |
|
|
void *fn_data;
|
251 |
|
|
enum gomp_task_kind kind;
|
252 |
|
|
bool in_taskwait;
|
253 |
|
|
bool in_tied_task;
|
254 |
|
|
gomp_sem_t taskwait_sem;
|
255 |
|
|
};
|
256 |
|
|
|
257 |
|
|
/* This structure describes a "team" of threads. These are the threads
|
258 |
|
|
that are spawned by a PARALLEL constructs, as well as the work sharing
|
259 |
|
|
constructs that the team encounters. */
|
260 |
|
|
|
261 |
|
|
struct gomp_team
|
262 |
|
|
{
|
263 |
|
|
/* This is the number of threads in the current team. */
|
264 |
|
|
unsigned nthreads;
|
265 |
|
|
|
266 |
|
|
/* This is number of gomp_work_share structs that have been allocated
|
267 |
|
|
as a block last time. */
|
268 |
|
|
unsigned work_share_chunk;
|
269 |
|
|
|
270 |
|
|
/* This is the saved team state that applied to a master thread before
|
271 |
|
|
the current thread was created. */
|
272 |
|
|
struct gomp_team_state prev_ts;
|
273 |
|
|
|
274 |
|
|
/* This semaphore should be used by the master thread instead of its
|
275 |
|
|
"native" semaphore in the thread structure. Required for nested
|
276 |
|
|
parallels, as the master is a member of two teams. */
|
277 |
|
|
gomp_sem_t master_release;
|
278 |
|
|
|
279 |
|
|
/* This points to an array with pointers to the release semaphore
|
280 |
|
|
of the threads in the team. */
|
281 |
|
|
gomp_sem_t **ordered_release;
|
282 |
|
|
|
283 |
|
|
/* List of gomp_work_share structs chained through next_free fields.
|
284 |
|
|
This is populated and taken off only by the first thread in the
|
285 |
|
|
team encountering a new work sharing construct, in a critical
|
286 |
|
|
section. */
|
287 |
|
|
struct gomp_work_share *work_share_list_alloc;
|
288 |
|
|
|
289 |
|
|
/* List of gomp_work_share structs freed by free_work_share. New
|
290 |
|
|
entries are atomically added to the start of the list, and
|
291 |
|
|
alloc_work_share can safely only move all but the first entry
|
292 |
|
|
to work_share_list alloc, as free_work_share can happen concurrently
|
293 |
|
|
with alloc_work_share. */
|
294 |
|
|
struct gomp_work_share *work_share_list_free;
|
295 |
|
|
|
296 |
|
|
#ifdef HAVE_SYNC_BUILTINS
|
297 |
|
|
/* Number of simple single regions encountered by threads in this
|
298 |
|
|
team. */
|
299 |
|
|
unsigned long single_count;
|
300 |
|
|
#else
|
301 |
|
|
/* Mutex protecting addition of workshares to work_share_list_free. */
|
302 |
|
|
gomp_mutex_t work_share_list_free_lock;
|
303 |
|
|
#endif
|
304 |
|
|
|
305 |
|
|
/* This barrier is used for most synchronization of the team. */
|
306 |
|
|
gomp_barrier_t barrier;
|
307 |
|
|
|
308 |
|
|
/* Initial work shares, to avoid allocating any gomp_work_share
|
309 |
|
|
structs in the common case. */
|
310 |
|
|
struct gomp_work_share work_shares[8];
|
311 |
|
|
|
312 |
|
|
gomp_mutex_t task_lock;
|
313 |
|
|
struct gomp_task *task_queue;
|
314 |
|
|
int task_count;
|
315 |
|
|
int task_running_count;
|
316 |
|
|
|
317 |
|
|
/* This array contains structures for implicit tasks. */
|
318 |
|
|
struct gomp_task implicit_task[];
|
319 |
|
|
};
|
320 |
|
|
|
321 |
|
|
/* This structure contains all data that is private to libgomp and is
|
322 |
|
|
allocated per thread. */
|
323 |
|
|
|
324 |
|
|
struct gomp_thread
|
325 |
|
|
{
|
326 |
|
|
/* This is the function that the thread should run upon launch. */
|
327 |
|
|
void (*fn) (void *data);
|
328 |
|
|
void *data;
|
329 |
|
|
|
330 |
|
|
/* This is the current team state for this thread. The ts.team member
|
331 |
|
|
is NULL only if the thread is idle. */
|
332 |
|
|
struct gomp_team_state ts;
|
333 |
|
|
|
334 |
|
|
/* This is the task that the thread is currently executing. */
|
335 |
|
|
struct gomp_task *task;
|
336 |
|
|
|
337 |
|
|
/* This semaphore is used for ordered loops. */
|
338 |
|
|
gomp_sem_t release;
|
339 |
|
|
|
340 |
|
|
/* user pthread thread pool */
|
341 |
|
|
struct gomp_thread_pool *thread_pool;
|
342 |
|
|
};
|
343 |
|
|
|
344 |
|
|
|
345 |
|
|
struct gomp_thread_pool
|
346 |
|
|
{
|
347 |
|
|
/* This array manages threads spawned from the top level, which will
|
348 |
|
|
return to the idle loop once the current PARALLEL construct ends. */
|
349 |
|
|
struct gomp_thread **threads;
|
350 |
|
|
unsigned threads_size;
|
351 |
|
|
unsigned threads_used;
|
352 |
|
|
struct gomp_team *last_team;
|
353 |
|
|
|
354 |
|
|
/* This barrier holds and releases threads waiting in threads. */
|
355 |
|
|
gomp_barrier_t threads_dock;
|
356 |
|
|
};
|
357 |
|
|
|
358 |
|
|
/* ... and here is that TLS data. */
|
359 |
|
|
|
360 |
|
|
#ifdef HAVE_TLS
|
361 |
|
|
extern __thread struct gomp_thread gomp_tls_data;
|
362 |
|
|
static inline struct gomp_thread *gomp_thread (void)
|
363 |
|
|
{
|
364 |
|
|
return &gomp_tls_data;
|
365 |
|
|
}
|
366 |
|
|
#else
|
367 |
|
|
extern pthread_key_t gomp_tls_key;
|
368 |
|
|
static inline struct gomp_thread *gomp_thread (void)
|
369 |
|
|
{
|
370 |
|
|
return pthread_getspecific (gomp_tls_key);
|
371 |
|
|
}
|
372 |
|
|
#endif
|
373 |
|
|
|
374 |
|
|
extern struct gomp_task_icv *gomp_new_icv (void);
|
375 |
|
|
|
376 |
|
|
/* Here's how to access the current copy of the ICVs. */
|
377 |
|
|
|
378 |
|
|
static inline struct gomp_task_icv *gomp_icv (bool write)
|
379 |
|
|
{
|
380 |
|
|
struct gomp_task *task = gomp_thread ()->task;
|
381 |
|
|
if (task)
|
382 |
|
|
return &task->icv;
|
383 |
|
|
else if (write)
|
384 |
|
|
return gomp_new_icv ();
|
385 |
|
|
else
|
386 |
|
|
return &gomp_global_icv;
|
387 |
|
|
}
|
388 |
|
|
|
389 |
|
|
/* The attributes to be used during thread creation. */
|
390 |
|
|
extern pthread_attr_t gomp_thread_attr;
|
391 |
|
|
|
392 |
|
|
/* Other variables. */
|
393 |
|
|
|
394 |
|
|
extern unsigned short *gomp_cpu_affinity;
|
395 |
|
|
extern size_t gomp_cpu_affinity_len;
|
396 |
|
|
|
397 |
|
|
/* Function prototypes. */
|
398 |
|
|
|
399 |
|
|
/* affinity.c */
|
400 |
|
|
|
401 |
|
|
extern void gomp_init_affinity (void);
|
402 |
|
|
extern void gomp_init_thread_affinity (pthread_attr_t *);
|
403 |
|
|
|
404 |
|
|
/* alloc.c */
|
405 |
|
|
|
406 |
|
|
extern void *gomp_malloc (size_t) __attribute__((malloc));
|
407 |
|
|
extern void *gomp_malloc_cleared (size_t) __attribute__((malloc));
|
408 |
|
|
extern void *gomp_realloc (void *, size_t);
|
409 |
|
|
|
410 |
|
|
/* Avoid conflicting prototypes of alloca() in system headers by using
|
411 |
|
|
GCC's builtin alloca(). */
|
412 |
|
|
#define gomp_alloca(x) __builtin_alloca(x)
|
413 |
|
|
|
414 |
|
|
/* error.c */
|
415 |
|
|
|
416 |
|
|
extern void gomp_error (const char *, ...)
|
417 |
|
|
__attribute__((format (printf, 1, 2)));
|
418 |
|
|
extern void gomp_fatal (const char *, ...)
|
419 |
|
|
__attribute__((noreturn, format (printf, 1, 2)));
|
420 |
|
|
|
421 |
|
|
/* iter.c */
|
422 |
|
|
|
423 |
|
|
extern int gomp_iter_static_next (long *, long *);
|
424 |
|
|
extern bool gomp_iter_dynamic_next_locked (long *, long *);
|
425 |
|
|
extern bool gomp_iter_guided_next_locked (long *, long *);
|
426 |
|
|
|
427 |
|
|
#ifdef HAVE_SYNC_BUILTINS
|
428 |
|
|
extern bool gomp_iter_dynamic_next (long *, long *);
|
429 |
|
|
extern bool gomp_iter_guided_next (long *, long *);
|
430 |
|
|
#endif
|
431 |
|
|
|
432 |
|
|
/* iter_ull.c */
|
433 |
|
|
|
434 |
|
|
extern int gomp_iter_ull_static_next (unsigned long long *,
|
435 |
|
|
unsigned long long *);
|
436 |
|
|
extern bool gomp_iter_ull_dynamic_next_locked (unsigned long long *,
|
437 |
|
|
unsigned long long *);
|
438 |
|
|
extern bool gomp_iter_ull_guided_next_locked (unsigned long long *,
|
439 |
|
|
unsigned long long *);
|
440 |
|
|
|
441 |
|
|
#if defined HAVE_SYNC_BUILTINS && defined __LP64__
|
442 |
|
|
extern bool gomp_iter_ull_dynamic_next (unsigned long long *,
|
443 |
|
|
unsigned long long *);
|
444 |
|
|
extern bool gomp_iter_ull_guided_next (unsigned long long *,
|
445 |
|
|
unsigned long long *);
|
446 |
|
|
#endif
|
447 |
|
|
|
448 |
|
|
/* ordered.c */
|
449 |
|
|
|
450 |
|
|
extern void gomp_ordered_first (void);
|
451 |
|
|
extern void gomp_ordered_last (void);
|
452 |
|
|
extern void gomp_ordered_next (void);
|
453 |
|
|
extern void gomp_ordered_static_init (void);
|
454 |
|
|
extern void gomp_ordered_static_next (void);
|
455 |
|
|
extern void gomp_ordered_sync (void);
|
456 |
|
|
|
457 |
|
|
/* parallel.c */
|
458 |
|
|
|
459 |
|
|
extern unsigned gomp_resolve_num_threads (unsigned, unsigned);
|
460 |
|
|
|
461 |
|
|
/* proc.c (in config/) */
|
462 |
|
|
|
463 |
|
|
extern void gomp_init_num_threads (void);
|
464 |
|
|
extern unsigned gomp_dynamic_max_threads (void);
|
465 |
|
|
|
466 |
|
|
/* task.c */
|
467 |
|
|
|
468 |
|
|
extern void gomp_init_task (struct gomp_task *, struct gomp_task *,
|
469 |
|
|
struct gomp_task_icv *);
|
470 |
|
|
extern void gomp_end_task (void);
|
471 |
|
|
extern void gomp_barrier_handle_tasks (gomp_barrier_state_t);
|
472 |
|
|
|
473 |
|
|
static void inline
|
474 |
|
|
gomp_finish_task (struct gomp_task *task)
|
475 |
|
|
{
|
476 |
|
|
gomp_sem_destroy (&task->taskwait_sem);
|
477 |
|
|
}
|
478 |
|
|
|
479 |
|
|
/* team.c */
|
480 |
|
|
|
481 |
|
|
extern struct gomp_team *gomp_new_team (unsigned);
|
482 |
|
|
extern void gomp_team_start (void (*) (void *), void *, unsigned,
|
483 |
|
|
struct gomp_team *);
|
484 |
|
|
extern void gomp_team_end (void);
|
485 |
|
|
|
486 |
|
|
/* work.c */
|
487 |
|
|
|
488 |
|
|
extern void gomp_init_work_share (struct gomp_work_share *, bool, unsigned);
|
489 |
|
|
extern void gomp_fini_work_share (struct gomp_work_share *);
|
490 |
|
|
extern bool gomp_work_share_start (bool);
|
491 |
|
|
extern void gomp_work_share_end (void);
|
492 |
|
|
extern void gomp_work_share_end_nowait (void);
|
493 |
|
|
|
494 |
|
|
static inline void
|
495 |
|
|
gomp_work_share_init_done (void)
|
496 |
|
|
{
|
497 |
|
|
struct gomp_thread *thr = gomp_thread ();
|
498 |
|
|
if (__builtin_expect (thr->ts.last_work_share != NULL, 1))
|
499 |
|
|
gomp_ptrlock_set (&thr->ts.last_work_share->next_ws, thr->ts.work_share);
|
500 |
|
|
}
|
501 |
|
|
|
502 |
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
503 |
|
|
# pragma GCC visibility pop
|
504 |
|
|
#endif
|
505 |
|
|
|
506 |
|
|
/* Now that we're back to default visibility, include the globals. */
|
507 |
|
|
#include "libgomp_g.h"
|
508 |
|
|
|
509 |
|
|
/* Include omp.h by parts. */
|
510 |
|
|
#include "omp-lock.h"
|
511 |
|
|
#define _LIBGOMP_OMP_LOCK_DEFINED 1
|
512 |
|
|
#include "omp.h.in"
|
513 |
|
|
|
514 |
|
|
#if !defined (HAVE_ATTRIBUTE_VISIBILITY) \
|
515 |
|
|
|| !defined (HAVE_ATTRIBUTE_ALIAS) \
|
516 |
|
|
|| !defined (HAVE_AS_SYMVER_DIRECTIVE) \
|
517 |
|
|
|| !defined (PIC)
|
518 |
|
|
# undef LIBGOMP_GNU_SYMBOL_VERSIONING
|
519 |
|
|
#endif
|
520 |
|
|
|
521 |
|
|
#ifdef LIBGOMP_GNU_SYMBOL_VERSIONING
|
522 |
|
|
extern void gomp_init_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
523 |
|
|
extern void gomp_destroy_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
524 |
|
|
extern void gomp_set_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
525 |
|
|
extern void gomp_unset_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
526 |
|
|
extern int gomp_test_lock_30 (omp_lock_t *) __GOMP_NOTHROW;
|
527 |
|
|
extern void gomp_init_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
528 |
|
|
extern void gomp_destroy_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
529 |
|
|
extern void gomp_set_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
530 |
|
|
extern void gomp_unset_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
531 |
|
|
extern int gomp_test_nest_lock_30 (omp_nest_lock_t *) __GOMP_NOTHROW;
|
532 |
|
|
|
533 |
|
|
extern void gomp_init_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
534 |
|
|
extern void gomp_destroy_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
535 |
|
|
extern void gomp_set_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
536 |
|
|
extern void gomp_unset_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
537 |
|
|
extern int gomp_test_lock_25 (omp_lock_25_t *) __GOMP_NOTHROW;
|
538 |
|
|
extern void gomp_init_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
539 |
|
|
extern void gomp_destroy_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
540 |
|
|
extern void gomp_set_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
541 |
|
|
extern void gomp_unset_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
542 |
|
|
extern int gomp_test_nest_lock_25 (omp_nest_lock_25_t *) __GOMP_NOTHROW;
|
543 |
|
|
|
544 |
|
|
# define strong_alias(fn, al) \
|
545 |
|
|
extern __typeof (fn) al __attribute__ ((alias (#fn)));
|
546 |
|
|
# define omp_lock_symver(fn) \
|
547 |
|
|
__asm (".symver g" #fn "_30, " #fn "@@OMP_3.0"); \
|
548 |
|
|
__asm (".symver g" #fn "_25, " #fn "@OMP_1.0");
|
549 |
|
|
#else
|
550 |
|
|
# define gomp_init_lock_30 omp_init_lock
|
551 |
|
|
# define gomp_destroy_lock_30 omp_destroy_lock
|
552 |
|
|
# define gomp_set_lock_30 omp_set_lock
|
553 |
|
|
# define gomp_unset_lock_30 omp_unset_lock
|
554 |
|
|
# define gomp_test_lock_30 omp_test_lock
|
555 |
|
|
# define gomp_init_nest_lock_30 omp_init_nest_lock
|
556 |
|
|
# define gomp_destroy_nest_lock_30 omp_destroy_nest_lock
|
557 |
|
|
# define gomp_set_nest_lock_30 omp_set_nest_lock
|
558 |
|
|
# define gomp_unset_nest_lock_30 omp_unset_nest_lock
|
559 |
|
|
# define gomp_test_nest_lock_30 omp_test_nest_lock
|
560 |
|
|
#endif
|
561 |
|
|
|
562 |
|
|
#ifdef HAVE_ATTRIBUTE_VISIBILITY
|
563 |
|
|
# define attribute_hidden __attribute__ ((visibility ("hidden")))
|
564 |
|
|
#else
|
565 |
|
|
# define attribute_hidden
|
566 |
|
|
#endif
|
567 |
|
|
|
568 |
|
|
#ifdef HAVE_ATTRIBUTE_ALIAS
|
569 |
|
|
# define ialias(fn) \
|
570 |
|
|
extern __typeof (fn) gomp_ialias_##fn \
|
571 |
|
|
__attribute__ ((alias (#fn))) attribute_hidden;
|
572 |
|
|
#else
|
573 |
|
|
# define ialias(fn)
|
574 |
|
|
#endif
|
575 |
|
|
|
576 |
|
|
#endif /* LIBGOMP_H */
|