OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.2.2/] [libgomp/] [team.c] - Blame information for rev 587

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* Copyright (C) 2005 Free Software Foundation, Inc.
2
   Contributed by Richard Henderson <rth@redhat.com>.
3
 
4
   This file is part of the GNU OpenMP Library (libgomp).
5
 
6
   Libgomp is free software; you can redistribute it and/or modify it
7
   under the terms of the GNU Lesser General Public License as published by
8
   the Free Software Foundation; either version 2.1 of the License, or
9
   (at your option) any later version.
10
 
11
   Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12
   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13
   FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
14
   more details.
15
 
16
   You should have received a copy of the GNU Lesser General Public License
17
   along with libgomp; see the file COPYING.LIB.  If not, write to the
18
   Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19
   MA 02110-1301, USA.  */
20
 
21
/* As a special exception, if you link this library with other files, some
22
   of which are compiled with GCC, to produce an executable, this library
23
   does not by itself cause the resulting executable to be covered by the
24
   GNU General Public License.  This exception does not however invalidate
25
   any other reasons why the executable file might be covered by the GNU
26
   General Public License.  */
27
 
28
/* This file handles the maintainence of threads in response to team
29
   creation and termination.  */
30
 
31
#include "libgomp.h"
32
#include <stdlib.h>
33
#include <string.h>
34
 
35
/* This array manages threads spawned from the top level, which will
36
   return to the idle loop once the current PARALLEL construct ends.  */
37
static struct gomp_thread **gomp_threads;
38
static unsigned gomp_threads_size;
39
static unsigned gomp_threads_used;
40
 
41
/* This attribute contains PTHREAD_CREATE_DETACHED.  */
42
pthread_attr_t gomp_thread_attr;
43
 
44
/* This barrier holds and releases threads waiting in gomp_threads.  */
45
static gomp_barrier_t gomp_threads_dock;
46
 
47
/* This is the libgomp per-thread data structure.  */
48
#ifdef HAVE_TLS
49
__thread struct gomp_thread gomp_tls_data;
50
#else
51
pthread_key_t gomp_tls_key;
52
#endif
53
 
54
 
55
/* This structure is used to communicate across pthread_create.  */
56
 
57
struct gomp_thread_start_data
58
{
59
  struct gomp_team_state ts;
60
  void (*fn) (void *);
61
  void *fn_data;
62
  bool nested;
63
};
64
 
65
 
66
/* This function is a pthread_create entry point.  This contains the idle
67
   loop in which a thread waits to be called up to become part of a team.  */
68
 
69
static void *
70
gomp_thread_start (void *xdata)
71
{
72
  struct gomp_thread_start_data *data = xdata;
73
  struct gomp_thread *thr;
74
  void (*local_fn) (void *);
75
  void *local_data;
76
 
77
#ifdef HAVE_TLS
78
  thr = &gomp_tls_data;
79
#else
80
  struct gomp_thread local_thr;
81
  thr = &local_thr;
82
  pthread_setspecific (gomp_tls_key, thr);
83
#endif
84
  gomp_sem_init (&thr->release, 0);
85
 
86
  /* Extract what we need from data.  */
87
  local_fn = data->fn;
88
  local_data = data->fn_data;
89
  thr->ts = data->ts;
90
 
91
  thr->ts.team->ordered_release[thr->ts.team_id] = &thr->release;
92
 
93
  if (data->nested)
94
    {
95
      gomp_barrier_wait (&thr->ts.team->barrier);
96
      local_fn (local_data);
97
      gomp_barrier_wait (&thr->ts.team->barrier);
98
    }
99
  else
100
    {
101
      gomp_threads[thr->ts.team_id] = thr;
102
 
103
      gomp_barrier_wait (&gomp_threads_dock);
104
      do
105
        {
106
          struct gomp_team *team;
107
 
108
          local_fn (local_data);
109
 
110
          /* Clear out the team and function data.  This is a debugging
111
             signal that we're in fact back in the dock.  */
112
          team = thr->ts.team;
113
          thr->fn = NULL;
114
          thr->data = NULL;
115
          thr->ts.team = NULL;
116
          thr->ts.work_share = NULL;
117
          thr->ts.team_id = 0;
118
          thr->ts.work_share_generation = 0;
119
          thr->ts.static_trip = 0;
120
 
121
          gomp_barrier_wait (&team->barrier);
122
          gomp_barrier_wait (&gomp_threads_dock);
123
 
124
          local_fn = thr->fn;
125
          local_data = thr->data;
126
        }
127
      while (local_fn);
128
    }
129
 
130
  return NULL;
131
}
132
 
133
 
134
/* Create a new team data structure.  */
135
 
136
static struct gomp_team *
137
new_team (unsigned nthreads, struct gomp_work_share *work_share)
138
{
139
  struct gomp_team *team;
140
  size_t size;
141
 
142
  size = sizeof (*team) + nthreads * sizeof (team->ordered_release[0]);
143
  team = gomp_malloc (size);
144
  gomp_mutex_init (&team->work_share_lock);
145
 
146
  team->work_shares = gomp_malloc (4 * sizeof (struct gomp_work_share *));
147
  team->generation_mask = 3;
148
  team->oldest_live_gen = work_share == NULL;
149
  team->num_live_gen = work_share != NULL;
150
  team->work_shares[0] = work_share;
151
 
152
  team->nthreads = nthreads;
153
  gomp_barrier_init (&team->barrier, nthreads);
154
 
155
  gomp_sem_init (&team->master_release, 0);
156
  team->ordered_release[0] = &team->master_release;
157
 
158
  return team;
159
}
160
 
161
 
162
/* Free a team data structure.  */
163
 
164
static void
165
free_team (struct gomp_team *team)
166
{
167
  free (team->work_shares);
168
  gomp_mutex_destroy (&team->work_share_lock);
169
  gomp_barrier_destroy (&team->barrier);
170
  gomp_sem_destroy (&team->master_release);
171
  free (team);
172
}
173
 
174
 
175
/* Launch a team.  */
176
 
177
void
178
gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
179
                 struct gomp_work_share *work_share)
180
{
181
  struct gomp_thread_start_data *start_data;
182
  struct gomp_thread *thr, *nthr;
183
  struct gomp_team *team;
184
  bool nested;
185
  unsigned i, n, old_threads_used = 0;
186
 
187
  thr = gomp_thread ();
188
  nested = thr->ts.team != NULL;
189
 
190
  team = new_team (nthreads, work_share);
191
 
192
  /* Always save the previous state, even if this isn't a nested team.
193
     In particular, we should save any work share state from an outer
194
     orphaned work share construct.  */
195
  team->prev_ts = thr->ts;
196
 
197
  thr->ts.team = team;
198
  thr->ts.work_share = work_share;
199
  thr->ts.team_id = 0;
200
  thr->ts.work_share_generation = 0;
201
  thr->ts.static_trip = 0;
202
 
203
  if (nthreads == 1)
204
    return;
205
 
206
  i = 1;
207
 
208
  /* We only allow the reuse of idle threads for non-nested PARALLEL
209
     regions.  This appears to be implied by the semantics of
210
     threadprivate variables, but perhaps that's reading too much into
211
     things.  Certainly it does prevent any locking problems, since
212
     only the initial program thread will modify gomp_threads.  */
213
  if (!nested)
214
    {
215
      old_threads_used = gomp_threads_used;
216
 
217
      if (nthreads <= old_threads_used)
218
        n = nthreads;
219
      else if (old_threads_used == 0)
220
        {
221
          n = 0;
222
          gomp_barrier_init (&gomp_threads_dock, nthreads);
223
        }
224
      else
225
        {
226
          n = old_threads_used;
227
 
228
          /* Increase the barrier threshold to make sure all new
229
             threads arrive before the team is released.  */
230
          gomp_barrier_reinit (&gomp_threads_dock, nthreads);
231
        }
232
 
233
      /* Not true yet, but soon will be.  We're going to release all
234
         threads from the dock, and those that aren't part of the
235
         team will exit.  */
236
      gomp_threads_used = nthreads;
237
 
238
      /* Release existing idle threads.  */
239
      for (; i < n; ++i)
240
        {
241
          nthr = gomp_threads[i];
242
          nthr->ts.team = team;
243
          nthr->ts.work_share = work_share;
244
          nthr->ts.team_id = i;
245
          nthr->ts.work_share_generation = 0;
246
          nthr->ts.static_trip = 0;
247
          nthr->fn = fn;
248
          nthr->data = data;
249
          team->ordered_release[i] = &nthr->release;
250
        }
251
 
252
      if (i == nthreads)
253
        goto do_release;
254
 
255
      /* If necessary, expand the size of the gomp_threads array.  It is
256
         expected that changes in the number of threads is rare, thus we
257
         make no effort to expand gomp_threads_size geometrically.  */
258
      if (nthreads >= gomp_threads_size)
259
        {
260
          gomp_threads_size = nthreads + 1;
261
          gomp_threads
262
            = gomp_realloc (gomp_threads,
263
                            gomp_threads_size
264
                            * sizeof (struct gomp_thread_data *));
265
        }
266
    }
267
 
268
  start_data = gomp_alloca (sizeof (struct gomp_thread_start_data)
269
                            * (nthreads-i));
270
 
271
  /* Launch new threads.  */
272
  for (; i < nthreads; ++i, ++start_data)
273
    {
274
      pthread_t pt;
275
      int err;
276
 
277
      start_data->ts.team = team;
278
      start_data->ts.work_share = work_share;
279
      start_data->ts.team_id = i;
280
      start_data->ts.work_share_generation = 0;
281
      start_data->ts.static_trip = 0;
282
      start_data->fn = fn;
283
      start_data->fn_data = data;
284
      start_data->nested = nested;
285
 
286
      err = pthread_create (&pt, &gomp_thread_attr,
287
                            gomp_thread_start, start_data);
288
      if (err != 0)
289
        gomp_fatal ("Thread creation failed: %s", strerror (err));
290
    }
291
 
292
 do_release:
293
  gomp_barrier_wait (nested ? &team->barrier : &gomp_threads_dock);
294
 
295
  /* Decrease the barrier threshold to match the number of threads
296
     that should arrive back at the end of this team.  The extra
297
     threads should be exiting.  Note that we arrange for this test
298
     to never be true for nested teams.  */
299
  if (nthreads < old_threads_used)
300
    gomp_barrier_reinit (&gomp_threads_dock, nthreads);
301
}
302
 
303
 
304
/* Terminate the current team.  This is only to be called by the master
305
   thread.  We assume that we must wait for the other threads.  */
306
 
307
void
308
gomp_team_end (void)
309
{
310
  struct gomp_thread *thr = gomp_thread ();
311
  struct gomp_team *team = thr->ts.team;
312
 
313
  gomp_barrier_wait (&team->barrier);
314
 
315
  thr->ts = team->prev_ts;
316
 
317
  free_team (team);
318
}
319
 
320
 
321
/* Constructors for this file.  */
322
 
323
static void __attribute__((constructor))
324
initialize_team (void)
325
{
326
  struct gomp_thread *thr;
327
 
328
#ifndef HAVE_TLS
329
  static struct gomp_thread initial_thread_tls_data;
330
 
331
  pthread_key_create (&gomp_tls_key, NULL);
332
  pthread_setspecific (gomp_tls_key, &initial_thread_tls_data);
333
#endif
334
 
335
#ifdef HAVE_TLS
336
  thr = &gomp_tls_data;
337
#else
338
  thr = &initial_thread_tls_data;
339
#endif
340
  gomp_sem_init (&thr->release, 0);
341
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.