URL
https://opencores.org/ocsvn/openrisc/openrisc/trunk
Subversion Repositories openrisc
Compare Revisions
- This comparison shows the changes necessary to convert path
/openrisc/trunk/gnu-old/gcc-4.2.2/libgomp/testsuite
- from Rev 154 to Rev 816
- ↔ Reverse comparison
Rev 154 → Rev 816
/libgomp.c++/copyin-1.C
0,0 → 1,34
// { dg-do run } |
// { dg-require-effective-target tls_runtime } |
|
#include <omp.h> |
|
extern "C" void abort (void); |
|
int thr = 32; |
#pragma omp threadprivate (thr) |
|
int |
main (void) |
{ |
int l = 0; |
|
omp_set_dynamic (0); |
omp_set_num_threads (6); |
|
#pragma omp parallel copyin (thr) reduction (||:l) |
{ |
l = thr != 32; |
thr = omp_get_thread_num () + 11; |
} |
|
if (l || thr != 11) |
abort (); |
|
#pragma omp parallel reduction (||:l) |
l = thr != omp_get_thread_num () + 11; |
|
if (l) |
abort (); |
return 0; |
} |
/libgomp.c++/copyin-2.C
0,0 → 1,34
// { dg-do run } |
// { dg-require-effective-target tls_runtime } |
|
#include <omp.h> |
|
extern "C" void abort (void); |
|
struct S { int t; char buf[64]; } thr = { 32, "" }; |
#pragma omp threadprivate (thr) |
|
int |
main (void) |
{ |
int l = 0; |
|
omp_set_dynamic (0); |
omp_set_num_threads (6); |
|
#pragma omp parallel copyin (thr) reduction (||:l) |
{ |
l = thr.t != 32; |
thr.t = omp_get_thread_num () + 11; |
} |
|
if (l || thr.t != 11) |
abort (); |
|
#pragma omp parallel reduction (||:l) |
l = thr.t != omp_get_thread_num () + 11; |
|
if (l) |
abort (); |
return 0; |
} |
/libgomp.c++/shared-1.C
0,0 → 1,60
#include <omp.h> |
|
extern "C" void abort (void); |
|
struct Y |
{ |
int l[5][10]; |
}; |
|
struct X |
{ |
struct Y y; |
float b[10]; |
}; |
|
void |
parallel (int a, int b) |
{ |
int i, j; |
struct X A[10][5]; |
a = b = 3; |
|
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
A[i][j].y.l[3][3] = -10; |
|
#pragma omp parallel shared (a, b, A) num_threads (5) |
{ |
int i, j; |
|
#pragma omp atomic |
a += omp_get_num_threads (); |
|
#pragma omp atomic |
b += omp_get_num_threads (); |
|
#pragma omp for private (j) |
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
A[i][j].y.l[3][3] += 20; |
|
} |
|
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
if (A[i][j].y.l[3][3] != 10) |
abort (); |
|
if (a != 28) |
abort (); |
|
if (b != 28) |
abort (); |
} |
|
main() |
{ |
parallel (1, 2); |
return 0; |
} |
/libgomp.c++/shared-2.C
0,0 → 1,47
extern "C" void abort (void); |
|
void |
parallel (int a, int b) |
{ |
int bad, LASTPRIV, LASTPRIV_SEC; |
int i; |
|
a = b = 3; |
|
bad = 0; |
|
#pragma omp parallel firstprivate (a,b) shared (bad) num_threads (5) |
{ |
if (a != 3 || b != 3) |
bad = 1; |
|
#pragma omp for lastprivate (LASTPRIV) |
for (i = 0; i < 10; i++) |
LASTPRIV = i; |
|
#pragma omp sections lastprivate (LASTPRIV_SEC) |
{ |
#pragma omp section |
{ LASTPRIV_SEC = 3; } |
|
#pragma omp section |
{ LASTPRIV_SEC = 42; } |
} |
|
} |
|
if (LASTPRIV != 9) |
abort (); |
|
if (LASTPRIV_SEC != 42) |
abort (); |
|
if (bad) |
abort (); |
} |
|
int main() |
{ |
parallel (1, 2); |
return 0; |
} |
/libgomp.c++/loop-1.C
0,0 → 1,96
#include <stdlib.h> |
#include <stdio.h> |
#include <string.h> |
#include <omp.h> |
|
#define MAX 1000 |
|
void main1() |
{ |
int i, N1, N2, step; |
int a[MAX], b[MAX]; |
|
N1 = rand () % 13; |
N2 = rand () % (MAX - 51) + 50; |
step = rand () % 7 + 1; |
|
printf ("N1 = %d\nN2 = %d\nstep = %d\n", N1, N2, step); |
|
for (i = N1; i <= N2; i += step) |
a[i] = 42+ i; |
|
/* COUNTING UP (<). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N1; i < N2; i += step) |
b[i] = a[i]; |
} |
|
/* COUNTING UP (<). Check that all the cells were filled in properly. */ |
for (i = N1; i < N2; i += step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i < %d; i += %d) [OK]\n", N1, N2, step); |
|
/* COUNTING UP (<=). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N1; i <= N2; i += step) |
b[i] = a[i]; |
} |
|
/* COUNTING UP (<=). Check that all the cells were filled in properly. */ |
for (i = N1; i <= N2; i += step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i <= %d; i += %d) [OK]\n", N1, N2, step); |
|
/* COUNTING DOWN (>). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N2; i > N1; i -= step) |
b[i] = a[i]; |
} |
|
/* COUNTING DOWN (>). Check that all the cells were filled in properly. */ |
for (i = N2; i > N1; i -= step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i > %d; i -= %d) [OK]\n", N2, N1, step); |
|
/* COUNTING DOWN (>=). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N2; i >= N1; i -= step) |
b[i] = a[i]; |
} |
|
/* COUNTING DOWN (>=). Check that all the cells were filled in properly. */ |
for (i = N2; i >= N1; i -= step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i >= %d; i -= %d) [OK]\n", N2, N1, step); |
} |
|
int |
main () |
{ |
int i; |
|
srand (0); |
for (i = 0; i < 10; ++i) |
main1(); |
return 0; |
} |
/libgomp.c++/loop-2.C
0,0 → 1,32
#include <omp.h> |
|
/* Orphaned work sharing. */ |
|
extern "C" void abort (void); |
|
#define N 10 |
|
void parloop (int *a) |
{ |
int i; |
|
#pragma omp for |
for (i = 0; i < N; i++) |
a[i] = i + 3; |
} |
|
main() |
{ |
int i, a[N]; |
|
#pragma omp parallel shared(a) |
{ |
parloop (a); |
} |
|
for (i = 0; i < N; i++) |
if (a[i] != i + 3) |
abort (); |
|
return 0; |
} |
/libgomp.c++/loop-3.C
0,0 → 1,26
extern "C" void abort (void); |
int a; |
|
void |
foo () |
{ |
int i; |
a = 30; |
#pragma omp barrier |
#pragma omp for lastprivate (a) |
for (i = 0; i < 1024; i++) |
{ |
a = i; |
} |
if (a != 1023) |
abort (); |
} |
|
int |
main (void) |
{ |
#pragma omp parallel num_threads (64) |
foo (); |
|
return 0; |
} |
/libgomp.c++/loop-4.C
0,0 → 1,20
extern "C" void abort (void); |
|
main() |
{ |
int i, a; |
|
a = 30; |
|
#pragma omp parallel for firstprivate (a) lastprivate (a) \ |
num_threads (2) schedule(static) |
for (i = 0; i < 10; i++) |
a = a + i; |
|
/* The thread that owns the last iteration will have computed |
30 + 5 + 6 + 7 + 8 + 9 = 65. */ |
if (a != 65) |
abort (); |
|
return 0; |
} |
/libgomp.c++/loop-5.C
0,0 → 1,19
extern "C" void abort (); |
|
int check; |
int f1() { check |= 1; return 1; } |
int f2() { check |= 2; return 11; } |
int f3() { check |= 4; return 2; } |
|
int a[12]; |
|
int main() |
{ |
#pragma omp for |
for (int i = f1(); i <= f2(); i += f3()) |
a[i] = 1; |
|
for (int i = 0; i < 12; ++i) |
if (a[i] != (i & 1)) |
abort (); |
} |
/libgomp.c++/loop-6.C
0,0 → 1,24
// { dg-do run } |
|
extern "C" void abort (void); |
|
volatile int count; |
static int test(void) |
{ |
return ++count > 0; |
} |
|
int main() |
{ |
int i; |
#pragma omp for |
for (i = 0; i < 10; ++i) |
{ |
if (test()) |
continue; |
abort (); |
} |
if (i != count) |
abort (); |
return 0; |
} |
/libgomp.c++/loop-7.C
0,0 → 1,22
// PR c++/24502 |
// { dg-do run } |
|
extern "C" void abort (); |
|
template <typename T> T |
foo (T r) |
{ |
T i; |
#pragma omp for |
for (i = 0; i < 10; i++) |
r += i; |
return r; |
} |
|
int |
main () |
{ |
if (foo (0) != 10 * 9 / 2 || foo (2L) != 10L * 9 / 2 + 2) |
abort (); |
return 0; |
} |
/libgomp.c++/pr30703.C
0,0 → 1,73
// PR c++/30703 |
// { dg-do run } |
|
#include <omp.h> |
|
extern "C" void abort (); |
|
int ctor, cctor, dtor; |
|
struct A |
{ |
A(); |
A(const A &); |
~A(); |
int i; |
}; |
|
A::A() |
{ |
#pragma omp atomic |
ctor++; |
} |
|
A::A(const A &r) |
{ |
i = r.i; |
#pragma omp atomic |
cctor++; |
} |
|
A::~A() |
{ |
#pragma omp atomic |
dtor++; |
} |
|
void |
foo (A a, A b) |
{ |
int i, j = 0; |
#pragma omp parallel for firstprivate (a) lastprivate (a) private (b) schedule (static, 1) num_threads (5) |
for (i = 0; i < 5; i++) |
{ |
b.i = 5; |
if (a.i != 6) |
#pragma omp atomic |
j += 1; |
a.i = b.i + i + 6; |
} |
|
if (j || a.i != 15) |
abort (); |
} |
|
void |
bar () |
{ |
A a, b; |
a.i = 6; |
b.i = 7; |
foo (a, b); |
} |
|
int |
main () |
{ |
omp_set_dynamic (false); |
if (ctor || cctor || dtor) |
abort (); |
bar (); |
if (ctor + cctor != dtor) |
abort (); |
} |
/libgomp.c++/single-1.C
0,0 → 1,19
extern "C" void abort (void); |
|
main() |
{ |
int i = 0; |
|
#pragma omp parallel shared (i) |
{ |
#pragma omp single |
{ |
i++; |
} |
} |
|
if (i != 1) |
abort (); |
|
return 0; |
} |
/libgomp.c++/single-2.C
0,0 → 1,36
extern "C" void abort (void); |
|
struct X |
{ |
int a; |
char b; |
int c; |
}; |
|
main() |
{ |
int i = 0; |
struct X x; |
int bad = 0; |
|
#pragma omp parallel private (i, x) shared (bad) |
{ |
i = 5; |
|
#pragma omp single copyprivate (i, x) |
{ |
i++; |
x.a = 23; |
x.b = 42; |
x.c = 26; |
} |
|
if (i != 6 || x.a != 23 || x.b != 42 || x.c != 26) |
bad = 1; |
} |
|
if (bad) |
abort (); |
|
return 0; |
} |
/libgomp.c++/single-3.C
0,0 → 1,21
extern "C" void abort (void); |
|
void |
single (int a, int b) |
{ |
#pragma omp single copyprivate(a) copyprivate(b) |
{ |
a = b = 5; |
} |
|
if (a != b) |
abort (); |
} |
|
int main() |
{ |
#pragma omp parallel |
single (1, 2); |
|
return 0; |
} |
/libgomp.c++/c++.exp
0,0 → 1,20
set lang_library_path "../libstdc++-v3/src/.libs" |
set lang_test_file "${lang_library_path}/libstdc++.a" |
set lang_link_flags "-lstdc++" |
|
load_lib libgomp-dg.exp |
|
# Initialize dg. |
dg-init |
|
if [file exists "${blddir}/${lang_test_file}"] { |
|
# Gather a list of all tests. |
set tests [lsort [glob -nocomplain $srcdir/$subdir/*.C]] |
|
# Main loop. |
gfortran-dg-runtest $tests "" |
} |
|
# All done. |
dg-finish |
/libgomp.c++/pr27337.C
0,0 → 1,87
// PR middle-end/27337 |
// { dg-do run } |
|
#include <omp.h> |
|
extern "C" void abort (void); |
|
struct S |
{ |
S (); |
~S (); |
S (const S &); |
int i; |
}; |
|
int n[3]; |
|
S::S () : i(18) |
{ |
if (omp_get_thread_num () != 0) |
#pragma omp atomic |
n[0]++; |
} |
|
S::~S () |
{ |
if (omp_get_thread_num () != 0) |
#pragma omp atomic |
n[1]++; |
} |
|
S::S (const S &x) |
{ |
if (x.i != 18) |
abort (); |
i = 118; |
if (omp_get_thread_num () != 0) |
#pragma omp atomic |
n[2]++; |
} |
|
S |
foo () |
{ |
int i; |
S ret; |
|
#pragma omp parallel for firstprivate (ret) lastprivate (ret) \ |
schedule (static, 1) num_threads (4) |
for (i = 0; i < 4; i++) |
ret.i += omp_get_thread_num (); |
|
return ret; |
} |
|
S |
bar () |
{ |
int i; |
S ret; |
|
#pragma omp parallel for num_threads (4) |
for (i = 0; i < 4; i++) |
#pragma omp atomic |
ret.i += omp_get_thread_num () + 1; |
|
return ret; |
} |
|
S x; |
|
int |
main (void) |
{ |
omp_set_dynamic (false); |
x = foo (); |
if (n[0] != 0 || n[1] != 3 || n[2] != 3) |
abort (); |
if (x.i != 118 + 3) |
abort (); |
x = bar (); |
if (n[0] != 0 || n[1] != 3 || n[2] != 3) |
abort (); |
if (x.i != 18 + 0 + 1 + 2 + 3 + 4) |
abort (); |
return 0; |
} |
/libgomp.c++/master-1.C
0,0 → 1,24
// PR c++/24734 |
// { dg-do run } |
|
extern "C" void abort (); |
int i; |
|
template<int> void |
foo () |
{ |
#pragma omp parallel |
{ |
#pragma omp master |
i++; |
} |
} |
|
int |
main () |
{ |
foo<0> (); |
if (i != 1) |
abort (); |
return 0; |
} |
/libgomp.c++/pr26943.C
0,0 → 1,62
// PR c++/26943 |
// { dg-do run } |
|
#include <assert.h> |
#include <unistd.h> |
|
struct S |
{ |
public: |
int x; |
S () : x(-1) { } |
S (const S &); |
S& operator= (const S &); |
void test (); |
}; |
|
static volatile int hold; |
|
S::S (const S &s) |
{ |
#pragma omp master |
sleep (1); |
|
assert (s.x == -1); |
x = 0; |
} |
|
S& |
S::operator= (const S& s) |
{ |
assert (s.x == 1); |
x = 2; |
return *this; |
} |
|
void |
S::test () |
{ |
assert (x == 0); |
x = 1; |
} |
|
static S x; |
|
void |
foo () |
{ |
#pragma omp sections firstprivate(x) lastprivate(x) |
{ |
x.test(); |
} |
} |
|
int |
main () |
{ |
#pragma omp parallel num_threads(2) |
foo(); |
|
assert (x.x == 2); |
return 0; |
} |
/libgomp.c++/parallel-1.C
0,0 → 1,40
#include <omp.h> |
|
extern "C" void abort (void); |
|
int |
foo (void) |
{ |
return 10; |
} |
|
main () |
{ |
int A = 0; |
|
#pragma omp parallel if (foo () > 10) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 1) |
abort (); |
|
#pragma omp parallel if (foo () == 10) num_threads (3) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 3) |
abort (); |
|
#pragma omp parallel if (foo () == 10) num_threads (foo ()) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 10) |
abort (); |
|
return 0; |
} |
/libgomp.c++/reduction-1.C
0,0 → 1,36
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0; |
double d = 1.0; |
#pragma omp parallel num_threads(4) reduction(+:i) reduction(*:d) reduction(&:k) |
{ |
if (i != 0 || d != 1.0 || k != ~0) |
#pragma omp atomic |
j |= 1; |
|
if (omp_get_num_threads () != 4) |
#pragma omp atomic |
j |= 2; |
|
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
|
if (j & 1) |
abort (); |
if ((j & 2) == 0) |
{ |
if (i != (0 + 1 + 2 + 3)) |
abort (); |
if (d != (1.0 * 2.0 * 3.0 * 4.0)) |
abort (); |
if (k != (~0 ^ 0x55)) |
abort (); |
} |
return 0; |
} |
/libgomp.c++/reduction-2.C
0,0 → 1,50
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0, l; |
double d = 1.0; |
#pragma omp parallel num_threads(4) |
{ |
#pragma omp single |
{ |
i = 16; |
k ^= (1 << 16); |
d += 32.0; |
} |
|
#pragma omp for reduction(+:i) reduction(*:d) reduction(&:k) |
for (l = 0; l < 4; l++) |
{ |
if (omp_get_num_threads () == 4 && (i != 0 || d != 1.0 || k != ~0)) |
#pragma omp atomic |
j |= 1; |
|
if (l == omp_get_thread_num ()) |
{ |
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
} |
|
if (omp_get_num_threads () == 4) |
{ |
if (i != (16 + 0 + 1 + 2 + 3)) |
#pragma omp atomic |
j |= 2; |
if (d != (33.0 * 1.0 * 2.0 * 3.0 * 4.0)) |
#pragma omp atomic |
j |= 4; |
if (k != (~0 ^ 0x55 ^ (1 << 16))) |
#pragma omp atomic |
j |= 8; |
} |
} |
|
if (j) |
abort (); |
return 0; |
} |
/libgomp.c++/reduction-3.C
0,0 → 1,51
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0, l; |
double d = 1.0; |
#pragma omp parallel num_threads(4) |
{ |
#pragma omp single |
{ |
i = 16; |
k ^= (1 << 16); |
d += 32.0; |
} |
|
#pragma omp for reduction(+:i) reduction(*:d) reduction(&:k) nowait |
for (l = 0; l < 4; l++) |
{ |
if (omp_get_num_threads () == 4 && (i != 0 || d != 1.0 || k != ~0)) |
#pragma omp atomic |
j |= 1; |
|
if (l == omp_get_thread_num ()) |
{ |
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
} |
|
if (omp_get_num_threads () == 4) |
{ |
#pragma omp barrier |
if (i != (16 + 0 + 1 + 2 + 3)) |
#pragma omp atomic |
j |= 2; |
if (d != (33.0 * 1.0 * 2.0 * 3.0 * 4.0)) |
#pragma omp atomic |
j |= 4; |
if (k != (~0 ^ 0x55 ^ (1 << 16))) |
#pragma omp atomic |
j |= 8; |
} |
} |
|
if (j) |
abort (); |
return 0; |
} |
/libgomp.c++/ctor-1.C
0,0 → 1,65
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int icount; |
static int dcount; |
static int xcount; |
|
B(); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
int B::icount; |
int B::dcount; |
int B::xcount; |
|
B::B() |
{ |
#pragma omp atomic |
icount++; |
} |
|
B::~B() |
{ |
#pragma omp atomic |
dcount++; |
} |
|
void B::doit() |
{ |
#pragma omp atomic |
xcount++; |
} |
|
static int nthreads; |
|
void foo() |
{ |
B b; |
#pragma omp parallel private(b) |
{ |
#pragma omp master |
nthreads = omp_get_num_threads (); |
b.doit(); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::xcount == nthreads); |
assert (B::icount == nthreads+1); |
assert (B::dcount == nthreads+1); |
|
return 0; |
} |
/libgomp.c++/ctor-2.C
0,0 → 1,76
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int ccount; |
static int dcount; |
static int xcount; |
static B *expected; |
|
B(); |
B(int); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
int B::ccount; |
int B::dcount; |
int B::xcount; |
B * B::expected; |
|
B::B(int) |
{ |
expected = this; |
} |
|
B::B(const B &b) |
{ |
#pragma omp atomic |
ccount++; |
assert (&b == expected); |
} |
|
B::~B() |
{ |
#pragma omp atomic |
dcount++; |
} |
|
void B::doit() |
{ |
#pragma omp atomic |
xcount++; |
assert (this != expected); |
} |
|
static int nthreads; |
|
void foo() |
{ |
B b(0); |
|
#pragma omp parallel firstprivate(b) |
{ |
#pragma omp master |
nthreads = omp_get_num_threads (); |
b.doit(); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::xcount == nthreads); |
assert (B::ccount == nthreads); |
assert (B::dcount == nthreads+1); |
|
return 0; |
} |
/libgomp.c++/ctor-3.C
0,0 → 1,89
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int icount; |
static int dcount; |
static int ccount; |
static B *e_inner; |
static B *e_outer; |
|
B(); |
B(int); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
int B::icount; |
int B::dcount; |
int B::ccount; |
B * B::e_inner; |
B * B::e_outer; |
|
B::B() |
{ |
#pragma omp atomic |
icount++; |
} |
|
B::B(int) |
{ |
e_outer = this; |
} |
|
B::~B() |
{ |
#pragma omp atomic |
dcount++; |
} |
|
B& B::operator= (const B &b) |
{ |
assert (&b == e_inner); |
assert (this == e_outer); |
#pragma omp atomic |
ccount++; |
return *this; |
} |
|
void B::doit() |
{ |
#pragma omp critical |
{ |
assert (e_inner == 0); |
e_inner = this; |
} |
} |
|
static int nthreads; |
|
void foo() |
{ |
B b(0); |
|
#pragma omp parallel sections lastprivate(b) |
{ |
#pragma omp section |
nthreads = omp_get_num_threads (); |
#pragma omp section |
b.doit (); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::ccount == 1); |
assert (B::icount == nthreads); |
assert (B::dcount == nthreads+1); |
|
return 0; |
} |
/libgomp.c++/ctor-4.C
0,0 → 1,90
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int ccount; |
static int dcount; |
static int ecount; |
static B *e_inner; |
static B *e_outer; |
|
B(); |
B(int); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
int B::ccount; |
int B::dcount; |
int B::ecount; |
B * B::e_inner; |
B * B::e_outer; |
|
B::B(int) |
{ |
e_outer = this; |
} |
|
B::B(const B &b) |
{ |
assert (&b == e_outer); |
#pragma omp atomic |
ccount++; |
} |
|
B::~B() |
{ |
#pragma omp atomic |
dcount++; |
} |
|
B& B::operator= (const B &b) |
{ |
assert (&b == e_inner); |
assert (this == e_outer); |
#pragma omp atomic |
ecount++; |
return *this; |
} |
|
void B::doit() |
{ |
#pragma omp critical |
{ |
assert (e_inner == 0); |
e_inner = this; |
} |
} |
|
static int nthreads; |
|
void foo() |
{ |
B b(0); |
|
#pragma omp parallel sections firstprivate(b) lastprivate(b) |
{ |
#pragma omp section |
nthreads = omp_get_num_threads (); |
#pragma omp section |
b.doit (); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::ecount == 1); |
assert (B::ccount == nthreads); |
assert (B::dcount == nthreads+1); |
|
return 0; |
} |
/libgomp.c++/ctor-5.C
0,0 → 1,52
// { dg-do run } |
// { dg-require-effective-target tls_runtime } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int count; |
static B *expected; |
|
B& operator=(const B &); |
}; |
|
int B::count; |
B * B::expected; |
|
static B thr; |
#pragma omp threadprivate(thr) |
|
B& B::operator= (const B &b) |
{ |
assert (&b == expected); |
assert (this != expected); |
#pragma omp atomic |
count++; |
return *this; |
} |
|
static int nthreads; |
|
void foo() |
{ |
B::expected = &thr; |
|
#pragma omp parallel copyin(thr) |
{ |
#pragma omp master |
nthreads = omp_get_num_threads (); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::count == nthreads-1); |
|
return 0; |
} |
/libgomp.c++/ctor-6.C
0,0 → 1,50
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
struct B |
{ |
static int count; |
static B *expected; |
|
B& operator=(const B &); |
}; |
|
int B::count; |
B * B::expected; |
|
B& B::operator= (const B &b) |
{ |
assert (&b == expected); |
assert (this != expected); |
#pragma omp atomic |
count++; |
return *this; |
} |
|
static int nthreads; |
|
void foo() |
{ |
#pragma omp parallel |
{ |
B b; |
#pragma omp single copyprivate(b) |
{ |
nthreads = omp_get_num_threads (); |
B::expected = &b; |
} |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::count == nthreads-1); |
|
return 0; |
} |
/libgomp.c++/ctor-7.C
0,0 → 1,67
// { dg-do run } |
|
#include <omp.h> |
#include <assert.h> |
|
#define N 10 |
|
struct B |
{ |
static int icount; |
static int dcount; |
static int xcount; |
|
B(); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
int B::icount; |
int B::dcount; |
int B::xcount; |
|
B::B() |
{ |
#pragma omp atomic |
icount++; |
} |
|
B::~B() |
{ |
#pragma omp atomic |
dcount++; |
} |
|
void B::doit() |
{ |
#pragma omp atomic |
xcount++; |
} |
|
static int nthreads; |
|
void foo() |
{ |
B b[N]; |
#pragma omp parallel private(b) |
{ |
#pragma omp master |
nthreads = omp_get_num_threads (); |
b[0].doit(); |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
foo(); |
|
assert (B::xcount == nthreads); |
assert (B::icount == (nthreads+1)*N); |
assert (B::dcount == (nthreads+1)*N); |
|
return 0; |
} |
/libgomp.c++/ctor-8.C
0,0 → 1,77
// { dg-do run } |
// { dg-require-effective-target tls_runtime } |
|
#include <omp.h> |
#include <assert.h> |
|
#define N 10 |
#define THR 4 |
|
struct B |
{ |
B(); |
B(const B &); |
~B(); |
B& operator=(const B &); |
void doit(); |
}; |
|
static B *base; |
static B *threadbase; |
static unsigned cmask[THR]; |
static unsigned dmask[THR]; |
|
#pragma omp threadprivate(threadbase) |
|
B::B() |
{ |
assert (base == 0); |
} |
|
B::B(const B &b) |
{ |
unsigned index = &b - base; |
assert (index < N); |
cmask[omp_get_thread_num()] |= 1u << index; |
} |
|
B::~B() |
{ |
if (threadbase) |
{ |
unsigned index = this - threadbase; |
assert (index < N); |
dmask[omp_get_thread_num()] |= 1u << index; |
} |
} |
|
void foo() |
{ |
B b[N]; |
|
base = b; |
|
#pragma omp parallel firstprivate(b) |
{ |
assert (omp_get_num_threads () == THR); |
threadbase = b; |
} |
|
threadbase = 0; |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (THR); |
foo(); |
|
for (int i = 0; i < THR; ++i) |
{ |
unsigned xmask = (1u << N) - 1; |
assert (cmask[i] == xmask); |
assert (dmask[i] == xmask); |
} |
|
return 0; |
} |
/libgomp.c++/ctor-9.C
0,0 → 1,60
// { dg-do run } |
// { dg-require-effective-target tls_runtime } |
|
#include <omp.h> |
#include <assert.h> |
|
#define N 10 |
#define THR 4 |
|
struct B |
{ |
B& operator=(const B &); |
}; |
|
static B *base; |
static B *threadbase; |
static int singlethread; |
#pragma omp threadprivate(threadbase) |
|
static unsigned cmask[THR]; |
|
B& B::operator= (const B &b) |
{ |
unsigned sindex = &b - base; |
unsigned tindex = this - threadbase; |
assert(sindex < N); |
assert(sindex == tindex); |
cmask[omp_get_thread_num ()] |= 1u << tindex; |
return *this; |
} |
|
void foo() |
{ |
#pragma omp parallel |
{ |
B b[N]; |
threadbase = b; |
#pragma omp single copyprivate(b) |
{ |
assert(omp_get_num_threads () == THR); |
singlethread = omp_get_thread_num (); |
base = b; |
} |
} |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
omp_set_num_threads (THR); |
foo(); |
|
for (int i = 0; i < THR; ++i) |
if (i == singlethread) |
assert(cmask[singlethread] == 0); |
else |
assert(cmask[i] == (1u << N) - 1); |
|
return 0; |
} |
/libgomp.c++/nested-1.C
0,0 → 1,28
// { dg-do run } |
|
extern "C" void abort(void); |
#define N 1000 |
|
int foo() |
{ |
int i = 0, j; |
|
#pragma omp parallel for num_threads(2) shared (i) |
for (j = 0; j < N; ++j) |
{ |
#pragma omp parallel num_threads(1) shared (i) |
{ |
#pragma omp atomic |
i++; |
} |
} |
|
return i; |
} |
|
int main() |
{ |
if (foo() != N) |
abort (); |
return 0; |
} |
/libgomp.c++/pr24455-1.C
0,0 → 1,6
// { dg-do compile } |
// { dg-require-effective-target tls } |
extern int i; |
#pragma omp threadprivate (i) |
|
int i; |
/libgomp.c++/sections-1.C
0,0 → 1,64
/****************************************************************************** |
* FILE: omp_workshare2.c |
* DESCRIPTION: |
* OpenMP Example - Sections Work-sharing - C/C++ Version |
* In this example, the OpenMP SECTION directive is used to assign |
* different array operations to threads that execute a SECTION. Each |
* thread receives its own copy of the result array to work with. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
#define N 50 |
|
int main (int argc, char *argv[]) { |
|
int i, nthreads, tid; |
float a[N], b[N], c[N]; |
|
/* Some initializations */ |
for (i=0; i<N; i++) |
a[i] = b[i] = i * 1.0; |
|
#pragma omp parallel shared(a,b,nthreads) private(c,i,tid) |
{ |
tid = omp_get_thread_num(); |
if (tid == 0) |
{ |
nthreads = omp_get_num_threads(); |
printf("Number of threads = %d\n", nthreads); |
} |
printf("Thread %d starting...\n",tid); |
|
#pragma omp sections nowait |
{ |
#pragma omp section |
{ |
printf("Thread %d doing section 1\n",tid); |
for (i=0; i<N; i++) |
{ |
c[i] = a[i] + b[i]; |
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); |
} |
} |
|
#pragma omp section |
{ |
printf("Thread %d doing section 2\n",tid); |
for (i=0; i<N; i++) |
{ |
c[i] = a[i] * b[i]; |
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); |
} |
} |
|
} /* end of sections */ |
|
printf("Thread %d done.\n",tid); |
|
} /* end of parallel section */ |
|
return 0; |
} |
/libgomp.c++/pr24455.C
0,0 → 1,23
// { dg-do run } |
// { dg-additional-sources pr24455-1.C } |
// { dg-require-effective-target tls_runtime } |
|
extern "C" void abort (void); |
|
extern int i; |
#pragma omp threadprivate(i) |
|
int main() |
{ |
i = 0; |
|
#pragma omp parallel default(none) num_threads(10) copyin(i) |
{ |
i++; |
#pragma omp barrier |
if (i != 1) |
abort (); |
} |
|
return 0; |
} |
/libgomp.c++/pr26691.C
0,0 → 1,20
// PR c++/26691 |
|
struct A |
{ |
int n; |
A (int i = 3) : n (i) {} |
}; |
|
int |
main () |
{ |
A a; |
int err = 0; |
#pragma omp parallel private (a) reduction (+:err) |
if (a.n != 3) |
err++; |
|
return err; |
} |
|
/Makefile.in
0,0 → 1,393
# Makefile.in generated by automake 1.9.6 from Makefile.am. |
# @configure_input@ |
|
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, |
# 2003, 2004, 2005 Free Software Foundation, Inc. |
# This Makefile.in is free software; the Free Software Foundation |
# gives unlimited permission to copy and/or distribute it, |
# with or without modifications, as long as this notice is preserved. |
|
# This program is distributed in the hope that it will be useful, |
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without |
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A |
# PARTICULAR PURPOSE. |
|
@SET_MAKE@ |
srcdir = @srcdir@ |
top_srcdir = @top_srcdir@ |
VPATH = @srcdir@ |
pkgdatadir = $(datadir)/@PACKAGE@ |
pkglibdir = $(libdir)/@PACKAGE@ |
pkgincludedir = $(includedir)/@PACKAGE@ |
top_builddir = .. |
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd |
INSTALL = @INSTALL@ |
install_sh_DATA = $(install_sh) -c -m 644 |
install_sh_PROGRAM = $(install_sh) -c |
install_sh_SCRIPT = $(install_sh) -c |
INSTALL_HEADER = $(INSTALL_DATA) |
transform = $(program_transform_name) |
NORMAL_INSTALL = : |
PRE_INSTALL = : |
POST_INSTALL = : |
NORMAL_UNINSTALL = : |
PRE_UNINSTALL = : |
POST_UNINSTALL = : |
build_triplet = @build@ |
host_triplet = @host@ |
target_triplet = @target@ |
subdir = testsuite |
DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in |
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 |
am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \ |
$(top_srcdir)/../config/depstand.m4 \ |
$(top_srcdir)/../config/enable.m4 \ |
$(top_srcdir)/../config/lead-dot.m4 \ |
$(top_srcdir)/../config/multi.m4 \ |
$(top_srcdir)/../config/stdint.m4 \ |
$(top_srcdir)/../config/tls.m4 $(top_srcdir)/acinclude.m4 \ |
$(top_srcdir)/../libtool.m4 $(top_srcdir)/configure.ac |
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ |
$(ACLOCAL_M4) |
mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs |
CONFIG_HEADER = $(top_builddir)/config.h |
CONFIG_CLEAN_FILES = |
SOURCES = |
DIST_SOURCES = |
DEJATOOL = $(PACKAGE) |
RUNTESTDEFAULTFLAGS = --tool $$tool --srcdir $$srcdir |
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) |
ACLOCAL = @ACLOCAL@ |
AMDEP_FALSE = @AMDEP_FALSE@ |
AMDEP_TRUE = @AMDEP_TRUE@ |
AMTAR = @AMTAR@ |
AR = @AR@ |
AUTOCONF = @AUTOCONF@ |
AUTOHEADER = @AUTOHEADER@ |
AUTOMAKE = @AUTOMAKE@ |
AWK = @AWK@ |
BUILD_INFO_FALSE = @BUILD_INFO_FALSE@ |
BUILD_INFO_TRUE = @BUILD_INFO_TRUE@ |
CC = @CC@ |
CCDEPMODE = @CCDEPMODE@ |
CFLAGS = @CFLAGS@ |
CPP = @CPP@ |
CPPFLAGS = @CPPFLAGS@ |
CYGPATH_W = @CYGPATH_W@ |
DEFS = @DEFS@ |
DEPDIR = @DEPDIR@ |
ECHO_C = @ECHO_C@ |
ECHO_N = @ECHO_N@ |
ECHO_T = @ECHO_T@ |
EGREP = @EGREP@ |
EXEEXT = @EXEEXT@ |
FC = @FC@ |
FCFLAGS = @FCFLAGS@ |
GENINSRC_FALSE = @GENINSRC_FALSE@ |
GENINSRC_TRUE = @GENINSRC_TRUE@ |
INSTALL_DATA = @INSTALL_DATA@ |
INSTALL_PROGRAM = @INSTALL_PROGRAM@ |
INSTALL_SCRIPT = @INSTALL_SCRIPT@ |
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ |
LDFLAGS = @LDFLAGS@ |
LIBGOMP_BUILD_VERSIONED_SHLIB_FALSE = @LIBGOMP_BUILD_VERSIONED_SHLIB_FALSE@ |
LIBGOMP_BUILD_VERSIONED_SHLIB_TRUE = @LIBGOMP_BUILD_VERSIONED_SHLIB_TRUE@ |
LIBOBJS = @LIBOBJS@ |
LIBS = @LIBS@ |
LIBTOOL = @LIBTOOL@ |
LN_S = @LN_S@ |
LTLIBOBJS = @LTLIBOBJS@ |
MAINT = @MAINT@ |
MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@ |
MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@ |
MAKEINFO = @MAKEINFO@ |
OBJEXT = @OBJEXT@ |
OMP_LOCK_ALIGN = @OMP_LOCK_ALIGN@ |
OMP_LOCK_KIND = @OMP_LOCK_KIND@ |
OMP_LOCK_SIZE = @OMP_LOCK_SIZE@ |
OMP_NEST_LOCK_ALIGN = @OMP_NEST_LOCK_ALIGN@ |
OMP_NEST_LOCK_KIND = @OMP_NEST_LOCK_KIND@ |
OMP_NEST_LOCK_SIZE = @OMP_NEST_LOCK_SIZE@ |
OPT_LDFLAGS = @OPT_LDFLAGS@ |
PACKAGE = @PACKAGE@ |
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ |
PACKAGE_NAME = @PACKAGE_NAME@ |
PACKAGE_STRING = @PACKAGE_STRING@ |
PACKAGE_TARNAME = @PACKAGE_TARNAME@ |
PACKAGE_VERSION = @PACKAGE_VERSION@ |
PATH_SEPARATOR = @PATH_SEPARATOR@ |
PERL = @PERL@ |
RANLIB = @RANLIB@ |
SECTION_LDFLAGS = @SECTION_LDFLAGS@ |
SET_MAKE = @SET_MAKE@ |
SHELL = @SHELL@ |
STRIP = @STRIP@ |
USE_FORTRAN_FALSE = @USE_FORTRAN_FALSE@ |
USE_FORTRAN_TRUE = @USE_FORTRAN_TRUE@ |
VERSION = @VERSION@ |
XCFLAGS = @XCFLAGS@ |
XLDFLAGS = @XLDFLAGS@ |
ac_ct_AR = @ac_ct_AR@ |
ac_ct_CC = @ac_ct_CC@ |
ac_ct_FC = @ac_ct_FC@ |
ac_ct_RANLIB = @ac_ct_RANLIB@ |
ac_ct_STRIP = @ac_ct_STRIP@ |
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@ |
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@ |
am__include = @am__include@ |
am__leading_dot = @am__leading_dot@ |
am__quote = @am__quote@ |
am__tar = @am__tar@ |
am__untar = @am__untar@ |
bindir = @bindir@ |
build = @build@ |
build_alias = @build_alias@ |
build_cpu = @build_cpu@ |
build_os = @build_os@ |
build_vendor = @build_vendor@ |
config_path = @config_path@ |
datadir = @datadir@ |
enable_shared = @enable_shared@ |
enable_static = @enable_static@ |
exec_prefix = @exec_prefix@ |
host = @host@ |
host_alias = @host_alias@ |
host_cpu = @host_cpu@ |
host_os = @host_os@ |
host_vendor = @host_vendor@ |
includedir = @includedir@ |
infodir = @infodir@ |
install_sh = @install_sh@ |
libdir = @libdir@ |
libexecdir = @libexecdir@ |
libtool_VERSION = @libtool_VERSION@ |
link_gomp = @link_gomp@ |
localstatedir = @localstatedir@ |
mandir = @mandir@ |
mkdir_p = @mkdir_p@ |
multi_basedir = @multi_basedir@ |
oldincludedir = @oldincludedir@ |
prefix = @prefix@ |
program_transform_name = @program_transform_name@ |
sbindir = @sbindir@ |
sharedstatedir = @sharedstatedir@ |
sysconfdir = @sysconfdir@ |
target = @target@ |
target_alias = @target_alias@ |
target_cpu = @target_cpu@ |
target_os = @target_os@ |
target_vendor = @target_vendor@ |
toolexecdir = @toolexecdir@ |
toolexeclibdir = @toolexeclibdir@ |
AUTOMAKE_OPTIONS = foreign dejagnu |
|
# May be used by various substitution variables. |
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) |
EXPECT = $(shell if test -f $(top_builddir)/../expect/expect; then \ |
echo $(top_builddir)/../expect/expect; else echo expect; fi) |
|
_RUNTEST = $(shell if test -f $(top_srcdir)/../dejagnu/runtest; then \ |
echo $(top_srcdir)/../dejagnu/runtest; else echo runtest; fi) |
|
RUNTEST = "$(_RUNTEST) $(AM_RUNTESTFLAGS)" |
all: all-am |
|
.SUFFIXES: |
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps) |
@for dep in $?; do \ |
case '$(am__configure_deps)' in \ |
*$$dep*) \ |
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \ |
&& exit 0; \ |
exit 1;; \ |
esac; \ |
done; \ |
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign testsuite/Makefile'; \ |
cd $(top_srcdir) && \ |
$(AUTOMAKE) --foreign testsuite/Makefile |
.PRECIOUS: Makefile |
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status |
@case '$?' in \ |
*config.status*) \ |
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \ |
*) \ |
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \ |
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \ |
esac; |
|
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) |
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh |
|
$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) |
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh |
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps) |
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh |
|
mostlyclean-libtool: |
-rm -f *.lo |
|
clean-libtool: |
-rm -rf .libs _libs |
|
distclean-libtool: |
-rm -f libtool |
uninstall-info-am: |
tags: TAGS |
TAGS: |
|
ctags: CTAGS |
CTAGS: |
|
|
check-DEJAGNU: site.exp |
srcdir=`$(am__cd) $(srcdir) && pwd`; export srcdir; \ |
EXPECT=$(EXPECT); export EXPECT; \ |
runtest=$(RUNTEST); \ |
if $(SHELL) -c "$$runtest --version" > /dev/null 2>&1; then \ |
l='$(DEJATOOL)'; for tool in $$l; do \ |
$$runtest $(AM_RUNTESTFLAGS) $(RUNTESTDEFAULTFLAGS) $(RUNTESTFLAGS); \ |
done; \ |
else echo "WARNING: could not find \`runtest'" 1>&2; :;\ |
fi |
site.exp: Makefile |
@echo 'Making a new site.exp file...' |
@echo '## these variables are automatically generated by make ##' >site.tmp |
@echo '# Do not edit here. If you wish to override these values' >>site.tmp |
@echo '# edit the last section' >>site.tmp |
@echo 'set srcdir $(srcdir)' >>site.tmp |
@echo "set objdir `pwd`" >>site.tmp |
@echo 'set build_alias "$(build_alias)"' >>site.tmp |
@echo 'set build_triplet $(build_triplet)' >>site.tmp |
@echo 'set host_alias "$(host_alias)"' >>site.tmp |
@echo 'set host_triplet $(host_triplet)' >>site.tmp |
@echo 'set target_alias "$(target_alias)"' >>site.tmp |
@echo 'set target_triplet $(target_triplet)' >>site.tmp |
@echo '## All variables above are generated by configure. Do Not Edit ##' >>site.tmp |
@test ! -f site.exp || \ |
sed '1,/^## All variables above are.*##/ d' site.exp >> site.tmp |
@-rm -f site.bak |
@test ! -f site.exp || mv site.exp site.bak |
@mv site.tmp site.exp |
|
distclean-DEJAGNU: |
-rm -f site.exp site.bak |
-l='$(DEJATOOL)'; for tool in $$l; do \ |
rm -f $$tool.sum $$tool.log; \ |
done |
|
distdir: $(DISTFILES) |
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \ |
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \ |
list='$(DISTFILES)'; for file in $$list; do \ |
case $$file in \ |
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \ |
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \ |
esac; \ |
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ |
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \ |
if test "$$dir" != "$$file" && test "$$dir" != "."; then \ |
dir="/$$dir"; \ |
$(mkdir_p) "$(distdir)$$dir"; \ |
else \ |
dir=''; \ |
fi; \ |
if test -d $$d/$$file; then \ |
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ |
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \ |
fi; \ |
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \ |
else \ |
test -f $(distdir)/$$file \ |
|| cp -p $$d/$$file $(distdir)/$$file \ |
|| exit 1; \ |
fi; \ |
done |
check-am: all-am |
$(MAKE) $(AM_MAKEFLAGS) check-DEJAGNU |
check: check-am |
all-am: Makefile |
installdirs: |
install: install-am |
install-exec: install-exec-am |
install-data: install-data-am |
uninstall: uninstall-am |
|
install-am: all-am |
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am |
|
installcheck: installcheck-am |
install-strip: |
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ |
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ |
`test -z '$(STRIP)' || \ |
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install |
mostlyclean-generic: |
|
clean-generic: |
|
distclean-generic: |
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) |
|
maintainer-clean-generic: |
@echo "This command is intended for maintainers to use" |
@echo "it deletes files that may require special tools to rebuild." |
clean: clean-am |
|
clean-am: clean-generic clean-libtool mostlyclean-am |
|
distclean: distclean-am |
-rm -f Makefile |
distclean-am: clean-am distclean-DEJAGNU distclean-generic \ |
distclean-libtool |
|
dvi: dvi-am |
|
dvi-am: |
|
html: html-am |
|
info: info-am |
|
info-am: |
|
install-data-am: |
|
install-exec-am: |
|
install-info: install-info-am |
|
install-man: |
|
installcheck-am: |
|
maintainer-clean: maintainer-clean-am |
-rm -f Makefile |
maintainer-clean-am: distclean-am maintainer-clean-generic |
|
mostlyclean: mostlyclean-am |
|
mostlyclean-am: mostlyclean-generic mostlyclean-libtool |
|
pdf: pdf-am |
|
pdf-am: |
|
ps: ps-am |
|
ps-am: |
|
uninstall-am: uninstall-info-am |
|
.PHONY: all all-am check check-DEJAGNU check-am clean clean-generic \ |
clean-libtool distclean distclean-DEJAGNU distclean-generic \ |
distclean-libtool distdir dvi dvi-am html html-am info info-am \ |
install install-am install-data install-data-am install-exec \ |
install-exec-am install-info install-info-am install-man \ |
install-strip installcheck installcheck-am installdirs \ |
maintainer-clean maintainer-clean-generic mostlyclean \ |
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ |
uninstall uninstall-am uninstall-info-am |
|
# Tell versions [3.59,3.63) of GNU make to not export all variables. |
# Otherwise a system limit (for SysV at least) may be exceeded. |
.NOEXPORT: |
/libgomp.fortran/pr25219.f90
0,0 → 1,15
! PR fortran/25219 |
|
implicit none |
save |
integer :: i, k |
k = 3 |
!$omp parallel |
!$omp do lastprivate (k) |
do i = 1, 100 |
k = i |
end do |
!$omp end do |
!$omp end parallel |
if (k .ne. 100) call abort |
end |
/libgomp.fortran/condinc1.f
0,0 → 1,7
! { dg-options "-fopenmp" } |
program condinc1 |
logical l |
l = .false. |
!$ include 'condinc1.inc' |
stop 2 |
end |
/libgomp.fortran/appendix-a/a.5.1.f90
0,0 → 1,8
! { dg-do run } |
PROGRAM A5 |
INCLUDE "omp_lib.h" ! or USE OMP_LIB |
CALL OMP_SET_DYNAMIC(.TRUE.) |
!$OMP PARALLEL NUM_THREADS(10) |
! do work here |
!$OMP END PARALLEL |
END PROGRAM A5 |
/libgomp.fortran/appendix-a/a.21.1.f90
0,0 → 1,19
! { dg-do compile } |
SUBROUTINE WORK(K) |
INTEGER k |
!$OMP ORDERED |
WRITE(*,*) K |
!$OMP END ORDERED |
END SUBROUTINE WORK |
SUBROUTINE SUBA21(LB, UB, STRIDE) |
INTEGER LB, UB, STRIDE |
INTEGER I |
!$OMP PARALLEL DO ORDERED SCHEDULE(DYNAMIC) |
DO I=LB,UB,STRIDE |
CALL WORK(I) |
END DO |
!$OMP END PARALLEL DO |
END SUBROUTINE SUBA21 |
PROGRAM A21 |
CALL SUBA21(1,100,5) |
END PROGRAM A21 |
/libgomp.fortran/appendix-a/a.40.1.f90
0,0 → 1,52
! { dg-do compile } |
! { dg-options "-ffixed-form" } |
MODULE DATA |
USE OMP_LIB, ONLY: OMP_NEST_LOCK_KIND |
TYPE LOCKED_PAIR |
INTEGER A |
INTEGER B |
INTEGER (OMP_NEST_LOCK_KIND) LCK |
END TYPE |
END MODULE DATA |
SUBROUTINE INCR_A(P, A) |
! called only from INCR_PAIR, no need to lock |
USE DATA |
TYPE(LOCKED_PAIR) :: P |
INTEGER A |
P%A = P%A + A |
END SUBROUTINE INCR_A |
SUBROUTINE INCR_B(P, B) |
! called from both INCR_PAIR and elsewhere, |
! so we need a nestable lock |
USE OMP_LIB ! or INCLUDE "omp_lib.h" |
USE DATA |
TYPE(LOCKED_PAIR) :: P |
INTEGER B |
CALL OMP_SET_NEST_LOCK(P%LCK) |
P%B = P%B + B |
CALL OMP_UNSET_NEST_LOCK(P%LCK) |
END SUBROUTINE INCR_B |
SUBROUTINE INCR_PAIR(P, A, B) |
USE OMP_LIB ! or INCLUDE "omp_lib.h" |
USE DATA |
TYPE(LOCKED_PAIR) :: P |
INTEGER A |
INTEGER B |
CALL OMP_SET_NEST_LOCK(P%LCK) |
CALL INCR_A(P, A) |
CALL INCR_B(P, B) |
CALL OMP_UNSET_NEST_LOCK(P%LCK) |
END SUBROUTINE INCR_PAIR |
SUBROUTINE A40(P) |
USE OMP_LIB ! or INCLUDE "omp_lib.h" |
USE DATA |
TYPE(LOCKED_PAIR) :: P |
INTEGER WORK1, WORK2, WORK3 |
EXTERNAL WORK1, WORK2, WORK3 |
!$OMP PARALLEL SECTIONS |
!$OMP SECTION |
CALL INCR_PAIR(P, WORK1(), WORK2()) |
!$OMP SECTION |
CALL INCR_B(P, WORK3()) |
!$OMP END PARALLEL SECTIONS |
END SUBROUTINE A40 |
/libgomp.fortran/appendix-a/a.15.1.f90
0,0 → 1,31
! { dg-do compile } |
SUBROUTINE WORK(N) |
INTEGER N |
END SUBROUTINE WORK |
SUBROUTINE SUB3(N) |
INTEGER N |
CALL WORK(N) |
!$OMP BARRIER |
CALL WORK(N) |
END SUBROUTINE SUB3 |
SUBROUTINE SUB2(K) |
INTEGER K |
!$OMP PARALLEL SHARED(K) |
CALL SUB3(K) |
!$OMP END PARALLEL |
END SUBROUTINE SUB2 |
SUBROUTINE SUB1(N) |
INTEGER N |
INTEGER I |
!$OMP PARALLEL PRIVATE(I) SHARED(N) |
!$OMP DO |
DO I = 1, N |
CALL SUB2(I) |
END DO |
!$OMP END PARALLEL |
END SUBROUTINE SUB1 |
PROGRAM A15 |
CALL SUB1(2) |
CALL SUB2(2) |
CALL SUB3(2) |
END PROGRAM A15 |
/libgomp.fortran/appendix-a/a.16.1.f90
0,0 → 1,41
! { dg-do run } |
REAL FUNCTION WORK1(I) |
INTEGER I |
WORK1 = 1.0 * I |
RETURN |
END FUNCTION WORK1 |
|
REAL FUNCTION WORK2(I) |
INTEGER I |
WORK2 = 2.0 * I |
RETURN |
END FUNCTION WORK2 |
|
SUBROUTINE SUBA16(X, Y, INDEX, N) |
REAL X(*), Y(*) |
INTEGER INDEX(*), N |
INTEGER I |
!$OMP PARALLEL DO SHARED(X, Y, INDEX, N) |
DO I=1,N |
!$OMP ATOMIC |
X(INDEX(I)) = X(INDEX(I)) + WORK1(I) |
Y(I) = Y(I) + WORK2(I) |
ENDDO |
END SUBROUTINE SUBA16 |
|
PROGRAM A16 |
REAL X(1000), Y(10000) |
INTEGER INDEX(10000) |
INTEGER I |
DO I=1,10000 |
INDEX(I) = MOD(I, 1000) + 1 |
Y(I) = 0.0 |
ENDDO |
DO I = 1,1000 |
X(I) = 0.0 |
ENDDO |
CALL SUBA16(X, Y, INDEX, 10000) |
DO I = 1,10 |
PRINT *, "X(", I, ") = ", X(I), ", Y(", I, ") = ", Y(I) |
ENDDO |
END PROGRAM A16 |
/libgomp.fortran/appendix-a/a.31.4.f90
0,0 → 1,12
! { dg-do run } |
MODULE M |
INTRINSIC MAX |
END MODULE M |
PROGRAM A31_4 |
USE M, REN => MAX |
N=0 |
!$OMP PARALLEL DO REDUCTION(REN: N) ! still does MAX |
DO I = 1, 100 |
N = MAX(N,I) |
END DO |
END PROGRAM A31_4 |
/libgomp.fortran/appendix-a/a.33.3.f90
0,0 → 1,10
! { dg-do compile } |
|
FUNCTION NEW_LOCK() |
USE OMP_LIB ! or INCLUDE "omp_lib.h" |
INTEGER(OMP_LOCK_KIND), POINTER :: NEW_LOCK |
!$OMP SINGLE |
ALLOCATE(NEW_LOCK) |
CALL OMP_INIT_LOCK(NEW_LOCK) |
!$OMP END SINGLE COPYPRIVATE(NEW_LOCK) |
END FUNCTION NEW_LOCK |
/libgomp.fortran/appendix-a/a.31.5.f90
0,0 → 1,14
! { dg-do run } |
MODULE MOD |
INTRINSIC MAX, MIN |
END MODULE MOD |
PROGRAM A31_5 |
USE MOD, MIN=>MAX, MAX=>MIN |
REAL :: R |
R = -HUGE(0.0) |
!$OMP PARALLEL DO REDUCTION(MIN: R) ! still does MAX |
DO I = 1, 1000 |
R = MIN(R, SIN(REAL(I))) |
END DO |
PRINT *, R |
END PROGRAM A31_5 |
/libgomp.fortran/appendix-a/a.26.1.f90
0,0 → 1,11
! { dg-do run } |
PROGRAM A26 |
INTEGER I, J |
I=1 |
J=2 |
!$OMP PARALLEL PRIVATE(I) FIRSTPRIVATE(J) |
I=3 |
J=J+2 |
!$OMP END PARALLEL |
PRINT *, I, J ! I and J are undefined |
END PROGRAM A26 |
/libgomp.fortran/appendix-a/a.18.1.f90
0,0 → 1,59
! { dg-do run } |
! { dg-options "-ffixed-form" } |
REAL FUNCTION FN1(I) |
INTEGER I |
FN1 = I * 2.0 |
RETURN |
END FUNCTION FN1 |
|
REAL FUNCTION FN2(A, B) |
REAL A, B |
FN2 = A + B |
RETURN |
END FUNCTION FN2 |
|
PROGRAM A18 |
INCLUDE "omp_lib.h" ! or USE OMP_LIB |
INTEGER ISYNC(256) |
REAL WORK(256) |
REAL RESULT(256) |
INTEGER IAM, NEIGHBOR |
!$OMP PARALLEL PRIVATE(IAM, NEIGHBOR) SHARED(WORK, ISYNC) NUM_THREADS(4) |
IAM = OMP_GET_THREAD_NUM() + 1 |
ISYNC(IAM) = 0 |
!$OMP BARRIER |
! Do computation into my portion of work array |
WORK(IAM) = FN1(IAM) |
! Announce that I am done with my work. |
! The first flush ensures that my work is made visible before |
! synch. The second flush ensures that synch is made visible. |
!$OMP FLUSH(WORK,ISYNC) |
ISYNC(IAM) = 1 |
!$OMP FLUSH(ISYNC) |
|
! Wait until neighbor is done. The first flush ensures that |
! synch is read from memory, rather than from the temporary |
! view of memory. The second flush ensures that work is read |
! from memory, and is done so after the while loop exits. |
IF (IAM .EQ. 1) THEN |
NEIGHBOR = OMP_GET_NUM_THREADS() |
ELSE |
NEIGHBOR = IAM - 1 |
ENDIF |
DO WHILE (ISYNC(NEIGHBOR) .EQ. 0) |
!$OMP FLUSH(ISYNC) |
END DO |
!$OMP FLUSH(WORK, ISYNC) |
RESULT(IAM) = FN2(WORK(NEIGHBOR), WORK(IAM)) |
!$OMP END PARALLEL |
DO I=1,4 |
IF (I .EQ. 1) THEN |
NEIGHBOR = 4 |
ELSE |
NEIGHBOR = I - 1 |
ENDIF |
IF (RESULT(I) .NE. I * 2 + NEIGHBOR * 2) THEN |
CALL ABORT |
ENDIF |
ENDDO |
END PROGRAM A18 |
/libgomp.fortran/appendix-a/a.19.1.f90
0,0 → 1,60
! { dg-do run } |
SUBROUTINE F1(Q) |
COMMON /DATA/ P, X |
INTEGER, TARGET :: X |
INTEGER, POINTER :: P |
INTEGER Q |
Q=1 |
!$OMP FLUSH |
! X, P and Q are flushed |
! because they are shared and accessible |
END SUBROUTINE F1 |
SUBROUTINE F2(Q) |
COMMON /DATA/ P, X |
INTEGER, TARGET :: X |
INTEGER, POINTER :: P |
INTEGER Q |
!$OMP BARRIER |
Q=2 |
!$OMP BARRIER |
! a barrier implies a flush |
! X, P and Q are flushed |
! because they are shared and accessible |
END SUBROUTINE F2 |
|
INTEGER FUNCTION G(N) |
COMMON /DATA/ P, X |
INTEGER, TARGET :: X |
INTEGER, POINTER :: P |
INTEGER N |
INTEGER I, J, SUM |
I=1 |
SUM = 0 |
P=1 |
!$OMP PARALLEL REDUCTION(+: SUM) NUM_THREADS(2) |
CALL F1(J) |
! I, N and SUM were not flushed |
! because they were not accessible in F1 |
! J was flushed because it was accessible |
SUM = SUM + J |
CALL F2(J) |
! I, N, and SUM were not flushed |
! because they were not accessible in f2 |
! J was flushed because it was accessible |
SUM = SUM + I + J + P + N |
!$OMP END PARALLEL |
G = SUM |
END FUNCTION G |
|
PROGRAM A19 |
COMMON /DATA/ P, X |
INTEGER, TARGET :: X |
INTEGER, POINTER :: P |
INTEGER RESULT, G |
P => X |
RESULT = G(10) |
PRINT *, RESULT |
IF (RESULT .NE. 30) THEN |
CALL ABORT |
ENDIF |
END PROGRAM A19 |
/libgomp.fortran/appendix-a/a.22.7.f90
0,0 → 1,33
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
|
PROGRAM A22_7_GOOD |
INTEGER, ALLOCATABLE, SAVE :: A(:) |
INTEGER, POINTER, SAVE :: PTR |
INTEGER, SAVE :: I |
INTEGER, TARGET :: TARG |
LOGICAL :: FIRSTIN = .TRUE. |
!$OMP THREADPRIVATE(A, I, PTR) |
ALLOCATE (A(3)) |
A = (/1,2,3/) |
PTR => TARG |
I=5 |
!$OMP PARALLEL COPYIN(I, PTR) |
!$OMP CRITICAL |
IF (FIRSTIN) THEN |
TARG = 4 ! Update target of ptr |
I = I + 10 |
IF (ALLOCATED(A)) A = A + 10 |
FIRSTIN = .FALSE. |
END IF |
IF (ALLOCATED(A)) THEN |
PRINT *, "a = ", A |
ELSE |
PRINT *, "A is not allocated" |
END IF |
PRINT *, "ptr = ", PTR |
PRINT *, "i = ", I |
PRINT * |
!$OMP END CRITICAL |
!$OMP END PARALLEL |
END PROGRAM A22_7_GOOD |
/libgomp.fortran/appendix-a/a.28.1.f90
0,0 → 1,14
! { dg-do run } |
|
SUBROUTINE SUB() |
COMMON /BLOCK/ X |
PRINT *,X ! X is undefined |
END SUBROUTINE SUB |
PROGRAM A28_1 |
COMMON /BLOCK/ X |
X = 1.0 |
!$OMP PARALLEL PRIVATE (X) |
X = 2.0 |
CALL SUB() |
!$OMP END PARALLEL |
END PROGRAM A28_1 |
/libgomp.fortran/appendix-a/a.38.1.f90
0,0 → 1,12
! { dg-do compile } |
|
FUNCTION NEW_LOCKS() |
USE OMP_LIB ! or INCLUDE "omp_lib.h" |
INTEGER(OMP_LOCK_KIND), DIMENSION(1000) :: NEW_LOCKS |
INTEGER I |
!$OMP PARALLEL DO PRIVATE(I) |
DO I=1,1000 |
CALL OMP_INIT_LOCK(NEW_LOCKS(I)) |
END DO |
!$OMP END PARALLEL DO |
END FUNCTION NEW_LOCKS |
/libgomp.fortran/appendix-a/a.22.8.f90
0,0 → 1,25
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
MODULE A22_MODULE8 |
REAL, POINTER :: WORK(:) |
SAVE WORK |
!$OMP THREADPRIVATE(WORK) |
END MODULE A22_MODULE8 |
SUBROUTINE SUB1(N) |
USE A22_MODULE8 |
!$OMP PARALLEL PRIVATE(THE_SUM) |
ALLOCATE(WORK(N)) |
CALL SUB2(THE_SUM) |
WRITE(*,*)THE_SUM |
!$OMP END PARALLEL |
END SUBROUTINE SUB1 |
SUBROUTINE SUB2(THE_SUM) |
USE A22_MODULE8 |
WORK(:) = 10 |
THE_SUM=SUM(WORK) |
END SUBROUTINE SUB2 |
PROGRAM A22_8_GOOD |
N = 10 |
CALL SUB1(N) |
END PROGRAM A22_8_GOOD |
|
/libgomp.fortran/appendix-a/a.28.2.f90
0,0 → 1,16
! { dg-do run } |
|
PROGRAM A28_2 |
COMMON /BLOCK2/ X |
X = 1.0 |
!$OMP PARALLEL PRIVATE (X) |
X = 2.0 |
CALL SUB() |
!$OMP END PARALLEL |
CONTAINS |
SUBROUTINE SUB() |
COMMON /BLOCK2/ Y |
PRINT *,X ! X is undefined |
PRINT *,Y ! Y is undefined |
END SUBROUTINE SUB |
END PROGRAM A28_2 |
/libgomp.fortran/appendix-a/a.28.3.f90
0,0 → 1,11
! { dg-do run } |
|
PROGRAM A28_3 |
EQUIVALENCE (X,Y) |
X = 1.0 |
!$OMP PARALLEL PRIVATE(X) |
PRINT *,Y ! Y is undefined |
Y = 10 |
PRINT *,X ! X is undefined |
!$OMP END PARALLEL |
END PROGRAM A28_3 |
/libgomp.fortran/appendix-a/a.39.1.f90
0,0 → 1,26
! { dg-do run } |
|
SUBROUTINE SKIP(ID) |
END SUBROUTINE SKIP |
SUBROUTINE WORK(ID) |
END SUBROUTINE WORK |
PROGRAM A39 |
INCLUDE "omp_lib.h" ! or USE OMP_LIB |
INTEGER(OMP_LOCK_KIND) LCK |
INTEGER ID |
CALL OMP_INIT_LOCK(LCK) |
!$OMP PARALLEL SHARED(LCK) PRIVATE(ID) |
ID = OMP_GET_THREAD_NUM() |
CALL OMP_SET_LOCK(LCK) |
PRINT *, "My thread id is ", ID |
CALL OMP_UNSET_LOCK(LCK) |
DO WHILE (.NOT. OMP_TEST_LOCK(LCK)) |
CALL SKIP(ID) ! We do not yet have the lock |
! so we must do something else |
END DO |
CALL WORK(ID) ! We now have the lock |
! and can do the work |
CALL OMP_UNSET_LOCK( LCK ) |
!$OMP END PARALLEL |
CALL OMP_DESTROY_LOCK( LCK ) |
END PROGRAM A39 |
/libgomp.fortran/appendix-a/a.28.4.f90
0,0 → 1,24
! { dg-do run } |
|
PROGRAM A28_4 |
INTEGER I, J |
INTEGER A(100), B(100) |
EQUIVALENCE (A(51), B(1)) |
!$OMP PARALLEL DO DEFAULT(PRIVATE) PRIVATE(I,J) LASTPRIVATE(A) |
DO I=1,100 |
DO J=1,100 |
B(J) = J - 1 |
ENDDO |
DO J=1,100 |
A(J) = J ! B becomes undefined at this point |
ENDDO |
DO J=1,50 |
B(J) = B(J) + 1 ! B is undefined |
! A becomes undefined at this point |
ENDDO |
ENDDO |
!$OMP END PARALLEL DO ! The LASTPRIVATE write for A has |
! undefined results |
PRINT *, B ! B is undefined since the LASTPRIVATE |
! write of A was not defined |
END PROGRAM A28_4 |
/libgomp.fortran/appendix-a/a.28.5.f90
0,0 → 1,29
! { dg-do compile } |
|
SUBROUTINE SUB1(X) |
DIMENSION X(10) |
! This use of X does not conform to the |
! specification. It would be legal Fortran 90, |
! but the OpenMP private directive allows the |
! compiler to break the sequence association that |
! A had with the rest of the common block. |
FORALL (I = 1:10) X(I) = I |
END SUBROUTINE SUB1 |
PROGRAM A28_5 |
COMMON /BLOCK5/ A |
DIMENSION B(10) |
EQUIVALENCE (A,B(1)) |
! the common block has to be at least 10 words |
A=0 |
!$OMP PARALLEL PRIVATE(/BLOCK5/) |
! Without the private clause, |
! we would be passing a member of a sequence |
! that is at least ten elements long. |
! With the private clause, A may no longer be |
! sequence-associated. |
CALL SUB1(A) |
!$OMP MASTER |
PRINT *, A |
!$OMP END MASTER |
!$OMP END PARALLEL |
END PROGRAM A28_5 |
/libgomp.fortran/appendix-a/a.2.1.f90
0,0 → 1,22
! { dg-do run } |
PROGRAM A2 |
INCLUDE "omp_lib.h" ! or USE OMP_LIB |
INTEGER X |
X=2 |
!$OMP PARALLEL NUM_THREADS(2) SHARED(X) |
IF (OMP_GET_THREAD_NUM() .EQ. 0) THEN |
X=5 |
ELSE |
! PRINT 1: The following read of x has a race |
PRINT *,"1: THREAD# ", OMP_GET_THREAD_NUM(), "X = ", X |
ENDIF |
!$OMP BARRIER |
IF (OMP_GET_THREAD_NUM() .EQ. 0) THEN |
! PRINT 2 |
PRINT *,"2: THREAD# ", OMP_GET_THREAD_NUM(), "X = ", X |
ELSE |
! PRINT 3 |
PRINT *,"3: THREAD# ", OMP_GET_THREAD_NUM(), "X = ", X |
ENDIF |
!$OMP END PARALLEL |
END PROGRAM A2 |
/libgomp.fortran/appendix-a/a.3.1.f90
0,0 → 1,6
! { dg-do run } |
! { dg-options "-ffixed-form" } |
PROGRAM A3 |
!234567890 |
!$ PRINT *, "Compiled by an OpenMP-compliant implementation." |
END PROGRAM A3 |
/libgomp.fortran/appendix-a/a10.1.f90
0,0 → 1,20
! { dg-do run } |
SUBROUTINE WORK1() |
END SUBROUTINE WORK1 |
SUBROUTINE WORK2() |
END SUBROUTINE WORK2 |
PROGRAM A10 |
!$OMP PARALLEL |
!$OMP SINGLE |
print *, "Beginning work1." |
!$OMP END SINGLE |
CALL WORK1() |
!$OMP SINGLE |
print *, "Finishing work1." |
!$OMP END SINGLE |
!$OMP SINGLE |
print *, "Finished work1 and beginning work2." |
!$OMP END SINGLE NOWAIT |
CALL WORK2() |
!$OMP END PARALLEL |
END PROGRAM A10 |
/libgomp.fortran/appendix-a/a.4.1.f90
0,0 → 1,29
! { dg-do run } |
SUBROUTINE SUBDOMAIN(X, ISTART, IPOINTS) |
INTEGER ISTART, IPOINTS |
REAL X(*) |
INTEGER I |
DO 100 I=1,IPOINTS |
X(ISTART+I) = 123.456 |
100 CONTINUE |
END SUBROUTINE SUBDOMAIN |
SUBROUTINE SUB(X, NPOINTS) |
INCLUDE "omp_lib.h" ! or USE OMP_LIB |
REAL X(*) |
INTEGER NPOINTS |
INTEGER IAM, NT, IPOINTS, ISTART |
!$OMP PARALLEL DEFAULT(PRIVATE) SHARED(X,NPOINTS) |
IAM = OMP_GET_THREAD_NUM() |
NT = OMP_GET_NUM_THREADS() |
IPOINTS = NPOINTS/NT |
ISTART = IAM * IPOINTS |
IF (IAM .EQ. NT-1) THEN |
IPOINTS = NPOINTS - ISTART |
ENDIF |
CALL SUBDOMAIN(X,ISTART,IPOINTS) |
!$OMP END PARALLEL |
END SUBROUTINE SUB |
PROGRAM A4 |
REAL ARRAY(10000) |
CALL SUB(ARRAY, 10000) |
END PROGRAM A4 |
/libgomp.fortran/pr27395-1.f90
0,0 → 1,31
! PR fortran/27395 |
! { dg-do run } |
|
program pr27395_1 |
implicit none |
integer, parameter :: n=10,m=1001 |
integer :: i |
integer, dimension(n) :: sumarray |
call foo(n,m,sumarray) |
do i=1,n |
if (sumarray(i).ne.m*i) call abort |
end do |
end program pr27395_1 |
|
subroutine foo(n,m,sumarray) |
use omp_lib, only : omp_get_thread_num |
implicit none |
integer, intent(in) :: n,m |
integer, dimension(n), intent(out) :: sumarray |
integer :: i,j |
sumarray(:)=0 |
!$OMP PARALLEL DEFAULT(shared) NUM_THREADS(4) |
!$OMP DO PRIVATE(j,i), REDUCTION(+:sumarray) |
do j=1,m |
do i=1,n |
sumarray(i)=sumarray(i)+i |
end do |
end do |
!$OMP END DO |
!$OMP END PARALLEL |
end subroutine foo |
/libgomp.fortran/pr27395-2.f90
0,0 → 1,30
! PR fortran/27395 |
! { dg-do run } |
|
program pr27395_2 |
implicit none |
integer, parameter :: n=10,m=1001 |
integer :: i |
call foo(n,m) |
end program pr27395_2 |
|
subroutine foo(n,m) |
use omp_lib, only : omp_get_thread_num |
implicit none |
integer, intent(in) :: n,m |
integer :: i,j |
integer, dimension(n) :: sumarray |
sumarray(:)=0 |
!$OMP PARALLEL DEFAULT(shared) NUM_THREADS(4) |
!$OMP DO PRIVATE(j,i), REDUCTION(+:sumarray) |
do j=1,m |
do i=1,n |
sumarray(i)=sumarray(i)+i |
end do |
end do |
!$OMP END DO |
!$OMP END PARALLEL |
do i=1,n |
if (sumarray(i).ne.m*i) call abort |
end do |
end subroutine foo |
/libgomp.fortran/crayptr1.f90
0,0 → 1,46
! { dg-do run } |
! { dg-options "-fopenmp -fcray-pointer" } |
|
use omp_lib |
integer :: a, b, c, p |
logical :: l |
pointer (ip, p) |
a = 1 |
b = 2 |
c = 3 |
l = .false. |
ip = loc (a) |
|
!$omp parallel num_threads (2) reduction (.or.:l) |
l = p .ne. 1 |
!$omp barrier |
!$omp master |
ip = loc (b) |
!$omp end master |
!$omp barrier |
l = l .or. p .ne. 2 |
!$omp barrier |
if (omp_get_thread_num () .eq. 1 .or. omp_get_num_threads () .lt. 2) & |
ip = loc (c) |
!$omp barrier |
l = l .or. p .ne. 3 |
!$omp end parallel |
|
if (l) call abort |
|
l = .false. |
!$omp parallel num_threads (2) reduction (.or.:l) default (private) |
ip = loc (a) |
a = 3 * omp_get_thread_num () + 4 |
b = a + 1 |
c = a + 2 |
l = p .ne. 3 * omp_get_thread_num () + 4 |
ip = loc (c) |
l = l .or. p .ne. 3 * omp_get_thread_num () + 6 |
ip = loc (b) |
l = l .or. p .ne. 3 * omp_get_thread_num () + 5 |
!$omp end parallel |
|
if (l) call abort |
|
end |
/libgomp.fortran/omp_parse1.f90
0,0 → 1,185
! { dg-do run } |
use omp_lib |
call test_parallel |
call test_do |
call test_sections |
call test_single |
|
contains |
subroutine test_parallel |
integer :: a, b, c, e, f, g, i, j |
integer, dimension (20) :: d |
logical :: h |
a = 6 |
b = 8 |
c = 11 |
d(:) = -1 |
e = 13 |
f = 24 |
g = 27 |
h = .false. |
i = 1 |
j = 16 |
!$omp para& |
!$omp&llel & |
!$omp if (a .eq. 6) private (b, c) shared (d) private (e) & |
!$omp firstprivate(f) num_threads (a - 1) first& |
!$ompprivate(g)default (shared) reduction (.or. : h) & |
!$omp reduction(*:i) |
if (i .ne. 1) h = .true. |
i = 2 |
if (f .ne. 24) h = .true. |
if (g .ne. 27) h = .true. |
e = 7 |
b = omp_get_thread_num () |
if (b .eq. 0) j = 24 |
f = b |
g = f |
c = omp_get_num_threads () |
if (c .gt. a - 1 .or. c .le. 0) h = .true. |
if (b .ge. c) h = .true. |
d(b + 1) = c |
if (f .ne. g .or. f .ne. b) h = .true. |
!$omp endparallel |
if (h) call abort |
if (a .ne. 6) call abort |
if (j .ne. 24) call abort |
if (d(1) .eq. -1) call abort |
e = 1 |
do g = 1, d(1) |
if (d(g) .ne. d(1)) call abort |
e = e * 2 |
end do |
if (e .ne. i) call abort |
end subroutine test_parallel |
|
subroutine test_do_orphan |
integer :: k, l |
!$omp parallel do private (l) |
do 600 k = 1, 16, 2 |
600 l = k |
end subroutine test_do_orphan |
|
subroutine test_do |
integer :: i, j, k, l, n |
integer, dimension (64) :: d |
logical :: m |
|
j = 16 |
d(:) = -1 |
m = .true. |
n = 24 |
!$omp parallel num_threads (4) shared (i, k, d) private (l) & |
!$omp&reduction (.and. : m) |
if (omp_get_thread_num () .eq. 0) then |
k = omp_get_num_threads () |
end if |
call test_do_orphan |
!$omp do schedule (static) firstprivate (n) |
do 200 i = 1, j |
if (i .eq. 1 .and. n .ne. 24) call abort |
n = i |
200 d(n) = omp_get_thread_num () |
!$omp enddo nowait |
|
!$omp do lastprivate (i) schedule (static, 5) |
do 201 i = j + 1, 2 * j |
201 d(i) = omp_get_thread_num () + 1024 |
! Implied omp end do here |
|
if (i .ne. 33) m = .false. |
|
!$omp do private (j) schedule (dynamic) |
do i = 33, 48 |
d(i) = omp_get_thread_num () + 2048 |
end do |
!$omp end do nowait |
|
!$omp do schedule (runtime) |
do i = 49, 4 * j |
d(i) = omp_get_thread_num () + 4096 |
end do |
! Implied omp end do here |
!$omp end parallel |
if (.not. m) call abort |
|
j = 0 |
do i = 1, 64 |
if (d(i) .lt. j .or. d(i) .ge. j + k) call abort |
if (i .eq. 16) j = 1024 |
if (i .eq. 32) j = 2048 |
if (i .eq. 48) j = 4096 |
end do |
end subroutine test_do |
|
subroutine test_sections |
integer :: i, j, k, l, m, n |
i = 9 |
j = 10 |
k = 11 |
l = 0 |
m = 0 |
n = 30 |
call omp_set_dynamic (.false.) |
call omp_set_num_threads (4) |
!$omp parallel num_threads (4) |
!$omp sections private (i) firstprivate (j, k) lastprivate (j) & |
!$omp& reduction (+ : l, m) |
!$omp section |
i = 24 |
if (j .ne. 10 .or. k .ne. 11 .or. m .ne. 0) l = 1 |
m = m + 4 |
!$omp section |
i = 25 |
if (j .ne. 10 .or. k .ne. 11) l = 1 |
m = m + 6 |
!$omp section |
i = 26 |
if (j .ne. 10 .or. k .ne. 11) l = 1 |
m = m + 8 |
!$omp section |
i = 27 |
if (j .ne. 10 .or. k .ne. 11) l = 1 |
m = m + 10 |
j = 271 |
!$omp end sections nowait |
!$omp sections lastprivate (n) |
!$omp section |
n = 6 |
!$omp section |
n = 7 |
!$omp endsections |
!$omp end parallel |
if (j .ne. 271 .or. l .ne. 0) call abort |
if (m .ne. 4 + 6 + 8 + 10) call abort |
if (n .ne. 7) call abort |
end subroutine test_sections |
|
subroutine test_single |
integer :: i, j, k, l |
logical :: m |
i = 200 |
j = 300 |
k = 400 |
l = 500 |
m = .false. |
!$omp parallel num_threads (4), private (i, j), reduction (.or. : m) |
i = omp_get_thread_num () |
j = omp_get_thread_num () |
!$omp single private (k) |
k = 64 |
!$omp end single nowait |
!$omp single private (k) firstprivate (l) |
if (i .ne. omp_get_thread_num () .or. i .ne. j) then |
j = -1 |
else |
j = -2 |
end if |
if (l .ne. 500) j = -1 |
l = 265 |
!$omp end single copyprivate (j) |
if (i .ne. omp_get_thread_num () .or. j .ne. -2) m = .true. |
!$omp endparallel |
if (m) call abort |
end subroutine test_single |
end |
/libgomp.fortran/omp_parse2.f90
0,0 → 1,102
! { dg-do run } |
use omp_lib |
call test_master |
call test_critical |
call test_barrier |
call test_atomic |
|
contains |
subroutine test_master |
logical :: i, j |
i = .false. |
j = .false. |
!$omp parallel num_threads (4) |
!$omp master |
i = .true. |
j = omp_get_thread_num () .eq. 0 |
!$omp endmaster |
!$omp end parallel |
if (.not. (i .or. j)) call abort |
end subroutine test_master |
|
subroutine test_critical_1 (i, j) |
integer :: i, j |
!$omp critical(critical_foo) |
i = i + 1 |
!$omp end critical (critical_foo) |
!$omp critical |
j = j + 1 |
!$omp end critical |
end subroutine test_critical_1 |
|
subroutine test_critical |
integer :: i, j, n |
n = -1 |
i = 0 |
j = 0 |
!$omp parallel num_threads (4) |
if (omp_get_thread_num () .eq. 0) n = omp_get_num_threads () |
call test_critical_1 (i, j) |
call test_critical_1 (i, j) |
!$omp critical |
j = j + 1 |
!$omp end critical |
!$omp critical (critical_foo) |
i = i + 1 |
!$omp endcritical (critical_foo) |
!$omp end parallel |
if (n .lt. 1 .or. i .ne. n * 3 .or. j .ne. n * 3) call abort |
end subroutine test_critical |
|
subroutine test_barrier |
integer :: i |
logical :: j |
i = 23 |
j = .false. |
!$omp parallel num_threads (4) |
if (omp_get_thread_num () .eq. 0) i = 5 |
!$omp flush (i) |
!$omp barrier |
if (i .ne. 5) then |
!$omp atomic |
j = j .or. .true. |
end if |
!$omp end parallel |
if (i .ne. 5 .or. j) call abort |
end subroutine test_barrier |
|
subroutine test_atomic |
integer :: a, b, c, d, e, f, g |
a = 0 |
b = 1 |
c = 0 |
d = 1024 |
e = 1024 |
f = -1 |
g = -1 |
!$omp parallel num_threads (8) |
!$omp atomic |
a = a + 2 + 4 |
!$omp atomic |
b = 3 * b |
!$omp atomic |
c = 8 - c |
!$omp atomic |
d = d / 2 |
!$omp atomic |
e = min (e, omp_get_thread_num ()) |
!$omp atomic |
f = max (omp_get_thread_num (), f) |
if (omp_get_thread_num () .eq. 0) g = omp_get_num_threads () |
!$omp end parallel |
if (g .le. 0 .or. g .gt. 8) call abort |
if (a .ne. 6 * g .or. b .ne. 3 ** g) call abort |
if (iand (g, 1) .eq. 1) then |
if (c .ne. 8) call abort |
else if (c .ne. 0) then |
call abort |
end if |
if (d .ne. 1024 / (2 ** g)) call abort |
if (e .ne. 0 .or. f .ne. g - 1) call abort |
end subroutine test_atomic |
end |
/libgomp.fortran/omp_parse3.f90
0,0 → 1,95
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
use omp_lib |
common /tlsblock/ x, y |
integer :: x, y, z |
save z |
!$omp threadprivate (/tlsblock/, z) |
|
call test_flush |
call test_ordered |
call test_threadprivate |
|
contains |
subroutine test_flush |
integer :: i, j |
i = 0 |
j = 0 |
!$omp parallel num_threads (4) |
if (omp_get_thread_num () .eq. 0) i = omp_get_num_threads () |
if (omp_get_thread_num () .eq. 0) j = j + 1 |
!$omp flush (i, j) |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) j = j + 2 |
!$omp flush |
!$omp barrier |
if (omp_get_thread_num () .eq. 2) j = j + 3 |
!$omp flush (i) |
!$omp flush (j) |
!$omp barrier |
if (omp_get_thread_num () .eq. 3) j = j + 4 |
!$omp end parallel |
end subroutine test_flush |
|
subroutine test_ordered |
integer :: i, j |
integer, dimension (100) :: d |
d(:) = -1 |
!$omp parallel do ordered schedule (dynamic) num_threads (4) |
do i = 1, 100, 5 |
!$omp ordered |
d(i) = i |
!$omp end ordered |
end do |
j = 1 |
do 100 i = 1, 100 |
if (i .eq. j) then |
if (d(i) .ne. i) call abort |
j = i + 5 |
else |
if (d(i) .ne. -1) call abort |
end if |
100 d(i) = -1 |
end subroutine test_ordered |
|
subroutine test_threadprivate |
common /tlsblock/ x, y |
!$omp threadprivate (/tlsblock/) |
integer :: i, j |
logical :: m, n |
call omp_set_num_threads (4) |
call omp_set_dynamic (.false.) |
i = -1 |
x = 6 |
y = 7 |
z = 8 |
n = .false. |
m = .false. |
!$omp parallel copyin (/tlsblock/, z) reduction (.or.:m) & |
!$omp& num_threads (4) |
if (omp_get_thread_num () .eq. 0) i = omp_get_num_threads () |
if (x .ne. 6 .or. y .ne. 7 .or. z .ne. 8) call abort |
x = omp_get_thread_num () |
y = omp_get_thread_num () + 1024 |
z = omp_get_thread_num () + 4096 |
!$omp end parallel |
if (x .ne. 0 .or. y .ne. 1024 .or. z .ne. 4096) call abort |
!$omp parallel num_threads (4), private (j) reduction (.or.:n) |
if (omp_get_num_threads () .eq. i) then |
j = omp_get_thread_num () |
if (x .ne. j .or. y .ne. j + 1024 .or. z .ne. j + 4096) & |
& call abort |
end if |
!$omp end parallel |
m = m .or. n |
n = .false. |
!$omp parallel num_threads (4), copyin (z) reduction (.or. : n) |
if (z .ne. 4096) n = .true. |
if (omp_get_num_threads () .eq. i) then |
j = omp_get_thread_num () |
if (x .ne. j .or. y .ne. j + 1024) call abort |
end if |
!$omp end parallel |
if (m .or. n) call abort |
end subroutine test_threadprivate |
end |
/libgomp.fortran/omp_parse4.f90
0,0 → 1,72
! { dg-do run } |
!$ use omp_lib |
call test_workshare |
|
contains |
subroutine test_workshare |
integer :: i, j, k, l, m |
double precision, dimension (64) :: d, e |
integer, dimension (10) :: f, g |
integer, dimension (16, 16) :: a, b, c |
integer, dimension (16) :: n |
d(:) = 1 |
e = 7 |
f = 10 |
l = 256 |
m = 512 |
g(1:3) = -1 |
g(4:6) = 0 |
g(7:8) = 5 |
g(9:10) = 10 |
forall (i = 1:16, j = 1:16) a (i, j) = i * 16 + j |
forall (j = 1:16) n (j) = j |
!$omp parallel num_threads (4) private (j, k) |
!$omp barrier |
!$omp workshare |
i = 6 |
e(:) = d(:) |
where (g .lt. 0) |
f = 100 |
elsewhere (g .eq. 0) |
f = 200 + f |
elsewhere |
where (g .gt. 6) f = f + sum (g) |
f = 300 + f |
end where |
where (f .gt. 210) g = 0 |
!$omp end workshare nowait |
!$omp workshare |
forall (j = 1:16, k = 1:16) b (k, j) = a (j, k) |
forall (k = 1:16) c (k, 1:16) = a (1:16, k) |
forall (j = 2:16, n (17 - j) / 4 * 4 .ne. n (17 - j)) |
n (j) = n (j - 1) * n (j) |
end forall |
!$omp endworkshare |
!$omp workshare |
!$omp atomic |
i = i + 8 + 6 |
!$omp critical |
!$omp critical (critical_foox) |
l = 128 |
!$omp end critical (critical_foox) |
!$omp endcritical |
!$omp parallel num_threads (2) |
!$ if (omp_get_thread_num () .eq. 0) m = omp_get_num_threads () |
!$omp atomic |
l = 1 + l |
!$omp end parallel |
!$omp end workshare |
!$omp end parallel |
|
if (any (f .ne. (/100, 100, 100, 210, 210, 210, 310, 310, 337, 337/))) & |
& call abort |
if (any (g .ne. (/-1, -1, -1, 0, 0, 0, 0, 0, 0, 0/))) call abort |
if (i .ne. 20) call abort |
!$ if (l .ne. 128 + m) call abort |
if (any (d .ne. 1 .or. e .ne. 1)) call abort |
if (any (b .ne. transpose (a))) call abort |
if (any (c .ne. b)) call abort |
if (any (n .ne. (/1, 2, 6, 12, 5, 30, 42, 56, 9, 90, & |
& 110, 132, 13, 182, 210, 240/))) call abort |
end subroutine test_workshare |
end |
/libgomp.fortran/condinc1.inc
0,0 → 1,2
if (l) stop 3 |
return |
/libgomp.fortran/character1.f90
0,0 → 1,72
! { dg-do run } |
!$ use omp_lib |
|
character (len = 8) :: h, i |
character (len = 4) :: j, k |
h = '01234567' |
i = 'ABCDEFGH' |
j = 'IJKL' |
k = 'MN' |
call test (h, j) |
contains |
subroutine test (p, q) |
character (len = 8) :: p |
character (len = 4) :: q, r |
character (len = 16) :: f |
character (len = 32) :: g |
integer, dimension (18) :: s |
logical :: l |
integer :: m |
f = 'test16' |
g = 'abcdefghijklmnopqrstuvwxyz' |
r = '' |
l = .false. |
s = -6 |
!$omp parallel firstprivate (f, p, s) private (r, m) reduction (.or.:l) & |
!$omp & num_threads (4) |
m = omp_get_thread_num () |
if (any (s .ne. -6)) l = .true. |
l = l .or. f .ne. 'test16' .or. p .ne. '01234567' |
l = l .or. g .ne. 'abcdefghijklmnopqrstuvwxyz' |
l = l .or. i .ne. 'ABCDEFGH' .or. q .ne. 'IJKL' |
l = l .or. k .ne. 'MN' |
!$omp barrier |
if (m .eq. 0) then |
f = 'ffffffff0' |
g = 'xyz' |
i = '123' |
k = '9876' |
p = '_abc' |
q = '_def' |
r = '1_23' |
else if (m .eq. 1) then |
f = '__' |
p = 'xxx' |
r = '7575' |
else if (m .eq. 2) then |
f = 'ZZ' |
p = 'm2' |
r = 'M2' |
else if (m .eq. 3) then |
f = 'YY' |
p = 'm3' |
r = 'M3' |
end if |
s = m |
!$omp barrier |
l = l .or. g .ne. 'xyz' .or. i .ne. '123' .or. k .ne. '9876' |
l = l .or. q .ne. '_def' |
if (any (s .ne. m)) l = .true. |
if (m .eq. 0) then |
l = l .or. f .ne. 'ffffffff0' .or. p .ne. '_abc' .or. r .ne. '1_23' |
else if (m .eq. 1) then |
l = l .or. f .ne. '__' .or. p .ne. 'xxx' .or. r .ne. '7575' |
else if (m .eq. 2) then |
l = l .or. f .ne. 'ZZ' .or. p .ne. 'm2' .or. r .ne. 'M2' |
else if (m .eq. 3) then |
l = l .or. f .ne. 'YY' .or. p .ne. 'm3' .or. r .ne. 'M3' |
end if |
!$omp end parallel |
if (l) call abort |
end subroutine test |
end |
/libgomp.fortran/reduction1.f90
0,0 → 1,181
! { dg-do run } |
!$ use omp_lib |
|
integer :: i, ia (6), n, cnt |
real :: r, ra (4) |
double precision :: d, da (5) |
complex :: c, ca (3) |
logical :: v |
|
i = 1 |
ia = 2 |
r = 3 |
ra = 4 |
d = 5.5 |
da = 6.5 |
c = cmplx (7.5, 1.5) |
ca = cmplx (8.5, -3.0) |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (+:i, ia, r, ra, d, da, c, ca) |
!$ if (i .ne. 0 .or. any (ia .ne. 0)) v = .true. |
!$ if (r .ne. 0 .or. any (ra .ne. 0)) v = .true. |
!$ if (d .ne. 0 .or. any (da .ne. 0)) v = .true. |
!$ if (c .ne. cmplx (0) .or. any (ca .ne. cmplx (0))) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = 4 |
ia(3:5) = -2 |
r = 5 |
ra(1:2) = 6.5 |
d = -2.5 |
da(2:4) = 8.5 |
c = cmplx (2.5, -3.5) |
ca(1) = cmplx (4.5, 5) |
else if (n .eq. 1) then |
i = 2 |
ia(4:6) = 5 |
r = 1 |
ra(2:4) = -1.5 |
d = 8.5 |
da(1:3) = 2.5 |
c = cmplx (0.5, -3) |
ca(2:3) = cmplx (-1, 6) |
else |
i = 1 |
ia = 1 |
r = -1 |
ra = -1 |
d = 1 |
da = -1 |
c = 1 |
ca = cmplx (-1, 0) |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (i .ne. 8 .or. any (ia .ne. (/3, 3, 1, 6, 6, 8/))) call abort |
if (r .ne. 8 .or. any (ra .ne. (/9.5, 8.0, 1.5, 1.5/))) call abort |
if (d .ne. 12.5 .or. any (da .ne. (/8.0, 16.5, 16.5, 14.0, 5.5/))) call abort |
if (c .ne. cmplx (11.5, -5)) call abort |
if (ca(1) .ne. cmplx (12, 2)) call abort |
if (ca(2) .ne. cmplx (6.5, 3) .or. ca(2) .ne. ca(3)) call abort |
end if |
|
i = 1 |
ia = 2 |
r = 3 |
ra = 4 |
d = 5.5 |
da = 6.5 |
c = cmplx (7.5, 1.5) |
ca = cmplx (8.5, -3.0) |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (-:i, ia, r, ra, d, da, c, ca) |
!$ if (i .ne. 0 .or. any (ia .ne. 0)) v = .true. |
!$ if (r .ne. 0 .or. any (ra .ne. 0)) v = .true. |
!$ if (d .ne. 0 .or. any (da .ne. 0)) v = .true. |
!$ if (c .ne. cmplx (0) .or. any (ca .ne. cmplx (0))) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = 4 |
ia(3:5) = -2 |
r = 5 |
ra(1:2) = 6.5 |
d = -2.5 |
da(2:4) = 8.5 |
c = cmplx (2.5, -3.5) |
ca(1) = cmplx (4.5, 5) |
else if (n .eq. 1) then |
i = 2 |
ia(4:6) = 5 |
r = 1 |
ra(2:4) = -1.5 |
d = 8.5 |
da(1:3) = 2.5 |
c = cmplx (0.5, -3) |
ca(2:3) = cmplx (-1, 6) |
else |
i = 1 |
ia = 1 |
r = -1 |
ra = -1 |
d = 1 |
da = -1 |
c = 1 |
ca = cmplx (-1, 0) |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (i .ne. 8 .or. any (ia .ne. (/3, 3, 1, 6, 6, 8/))) call abort |
if (r .ne. 8 .or. any (ra .ne. (/9.5, 8.0, 1.5, 1.5/))) call abort |
if (d .ne. 12.5 .or. any (da .ne. (/8.0, 16.5, 16.5, 14.0, 5.5/))) call abort |
if (c .ne. cmplx (11.5, -5)) call abort |
if (ca(1) .ne. cmplx (12, 2)) call abort |
if (ca(2) .ne. cmplx (6.5, 3) .or. ca(2) .ne. ca(3)) call abort |
end if |
|
i = 1 |
ia = 2 |
r = 4 |
ra = 8 |
d = 16 |
da = 32 |
c = 2 |
ca = cmplx (0, 2) |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (*:i, ia, r, ra, d, da, c, ca) |
!$ if (i .ne. 1 .or. any (ia .ne. 1)) v = .true. |
!$ if (r .ne. 1 .or. any (ra .ne. 1)) v = .true. |
!$ if (d .ne. 1 .or. any (da .ne. 1)) v = .true. |
!$ if (c .ne. cmplx (1) .or. any (ca .ne. cmplx (1))) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = 3 |
ia(3:5) = 2 |
r = 0.5 |
ra(1:2) = 2 |
d = -1 |
da(2:4) = -2 |
c = 2.5 |
ca(1) = cmplx (-5, 0) |
else if (n .eq. 1) then |
i = 2 |
ia(4:6) = -2 |
r = 8 |
ra(2:4) = -0.5 |
da(1:3) = -1 |
c = -3 |
ca(2:3) = cmplx (0, -1) |
else |
ia = 2 |
r = 0.5 |
ra = 0.25 |
d = 2.5 |
da = -1 |
c = cmplx (0, -1) |
ca = cmplx (-1, 0) |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (i .ne. 6 .or. any (ia .ne. (/4, 4, 8, -16, -16, -8/))) call abort |
if (r .ne. 8 .or. any (ra .ne. (/4., -2., -1., -1./))) call abort |
if (d .ne. -40 .or. any (da .ne. (/32., -64., -64., 64., -32./))) call abort |
if (c .ne. cmplx (0, 15)) call abort |
if (ca(1) .ne. cmplx (0, 10)) call abort |
if (ca(2) .ne. cmplx (-2, 0) .or. ca(2) .ne. ca(3)) call abort |
end if |
end |
/libgomp.fortran/character2.f90
0,0 → 1,61
! { dg-do run } |
!$ use omp_lib |
|
character (len = 8) :: h |
character (len = 9) :: i |
h = '01234567' |
i = 'ABCDEFGHI' |
call test (h, i, 9) |
contains |
subroutine test (p, q, n) |
character (len = *) :: p |
character (len = n) :: q |
character (len = n) :: r |
character (len = n) :: t |
character (len = n) :: u |
integer, dimension (n + 4) :: s |
logical :: l |
integer :: m |
r = '' |
if (n .gt. 8) r = 'jklmnopqr' |
do m = 1, n + 4 |
s(m) = m |
end do |
u = 'abc' |
l = .false. |
!$omp parallel firstprivate (p, q, r) private (t, m) reduction (.or.:l) & |
!$omp & num_threads (2) |
do m = 1, 13 |
if (s(m) .ne. m) l = .true. |
end do |
m = omp_get_thread_num () |
l = l .or. p .ne. '01234567' .or. q .ne. 'ABCDEFGHI' |
l = l .or. r .ne. 'jklmnopqr' .or. u .ne. 'abc' |
!$omp barrier |
if (m .eq. 0) then |
p = 'A' |
q = 'B' |
r = 'C' |
t = '123' |
u = '987654321' |
else if (m .eq. 1) then |
p = 'D' |
q = 'E' |
r = 'F' |
t = '456' |
s = m |
end if |
!$omp barrier |
l = l .or. u .ne. '987654321' |
if (any (s .ne. 1)) l = .true. |
if (m .eq. 0) then |
l = l .or. p .ne. 'A' .or. q .ne. 'B' .or. r .ne. 'C' |
l = l .or. t .ne. '123' |
else |
l = l .or. p .ne. 'D' .or. q .ne. 'E' .or. r .ne. 'F' |
l = l .or. t .ne. '456' |
end if |
!$omp end parallel |
if (l) call abort |
end subroutine test |
end |
/libgomp.fortran/jacobi.f
0,0 → 1,261
* { dg-do run } |
|
program main |
************************************************************ |
* program to solve a finite difference |
* discretization of Helmholtz equation : |
* (d2/dx2)u + (d2/dy2)u - alpha u = f |
* using Jacobi iterative method. |
* |
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 |
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 |
* |
* Directives are used in this code to achieve paralleism. |
* All do loops are parallized with default 'static' scheduling. |
* |
* Input : n - grid dimension in x direction |
* m - grid dimension in y direction |
* alpha - Helmholtz constant (always greater than 0.0) |
* tol - error tolerance for iterative solver |
* relax - Successice over relaxation parameter |
* mits - Maximum iterations for iterative solver |
* |
* On output |
* : u(n,m) - Dependent variable (solutions) |
* : f(n,m) - Right hand side function |
************************************************************* |
implicit none |
|
integer n,m,mits,mtemp |
include "omp_lib.h" |
double precision tol,relax,alpha |
|
common /idat/ n,m,mits,mtemp |
common /fdat/tol,alpha,relax |
* |
* Read info |
* |
write(*,*) "Input n,m - grid dimension in x,y direction " |
n = 64 |
m = 64 |
* read(5,*) n,m |
write(*,*) n, m |
write(*,*) "Input alpha - Helmholts constant " |
alpha = 0.5 |
* read(5,*) alpha |
write(*,*) alpha |
write(*,*) "Input relax - Successive over-relaxation parameter" |
relax = 0.9 |
* read(5,*) relax |
write(*,*) relax |
write(*,*) "Input tol - error tolerance for iterative solver" |
tol = 1.0E-12 |
* read(5,*) tol |
write(*,*) tol |
write(*,*) "Input mits - Maximum iterations for solver" |
mits = 100 |
* read(5,*) mits |
write(*,*) mits |
|
call omp_set_num_threads (2) |
|
* |
* Calls a driver routine |
* |
call driver () |
|
stop |
end |
|
subroutine driver ( ) |
************************************************************* |
* Subroutine driver () |
* This is where the arrays are allocated and initialzed. |
* |
* Working varaibles/arrays |
* dx - grid spacing in x direction |
* dy - grid spacing in y direction |
************************************************************* |
implicit none |
|
integer n,m,mits,mtemp |
double precision tol,relax,alpha |
|
common /idat/ n,m,mits,mtemp |
common /fdat/tol,alpha,relax |
|
double precision u(n,m),f(n,m),dx,dy |
|
* Initialize data |
|
call initialize (n,m,alpha,dx,dy,u,f) |
|
* Solve Helmholtz equation |
|
call jacobi (n,m,dx,dy,alpha,relax,u,f,tol,mits) |
|
* Check error between exact solution |
|
call error_check (n,m,alpha,dx,dy,u,f) |
|
return |
end |
|
subroutine initialize (n,m,alpha,dx,dy,u,f) |
****************************************************** |
* Initializes data |
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) |
* |
****************************************************** |
implicit none |
|
integer n,m |
double precision u(n,m),f(n,m),dx,dy,alpha |
|
integer i,j, xx,yy |
double precision PI |
parameter (PI=3.1415926) |
|
dx = 2.0 / (n-1) |
dy = 2.0 / (m-1) |
|
* Initilize initial condition and RHS |
|
!$omp parallel do private(xx,yy) |
do j = 1,m |
do i = 1,n |
xx = -1.0 + dx * dble(i-1) ! -1 < x < 1 |
yy = -1.0 + dy * dble(j-1) ! -1 < y < 1 |
u(i,j) = 0.0 |
f(i,j) = -alpha *(1.0-xx*xx)*(1.0-yy*yy) |
& - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy) |
enddo |
enddo |
!$omp end parallel do |
|
return |
end |
|
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) |
****************************************************************** |
* Subroutine HelmholtzJ |
* Solves poisson equation on rectangular grid assuming : |
* (1) Uniform discretization in each direction, and |
* (2) Dirichlect boundary conditions |
* |
* Jacobi method is used in this routine |
* |
* Input : n,m Number of grid points in the X/Y directions |
* dx,dy Grid spacing in the X/Y directions |
* alpha Helmholtz eqn. coefficient |
* omega Relaxation factor |
* f(n,m) Right hand side function |
* u(n,m) Dependent variable/Solution |
* tol Tolerance for iterative solver |
* maxit Maximum number of iterations |
* |
* Output : u(n,m) - Solution |
***************************************************************** |
implicit none |
integer n,m,maxit |
double precision dx,dy,f(n,m),u(n,m),alpha, tol,omega |
* |
* Local variables |
* |
integer i,j,k,k_local |
double precision error,resid,rsum,ax,ay,b |
double precision error_local, uold(n,m) |
|
real ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2 |
real te1,te2 |
real second |
external second |
* |
* Initialize coefficients |
ax = 1.0/(dx*dx) ! X-direction coef |
ay = 1.0/(dy*dy) ! Y-direction coef |
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha ! Central coeff |
|
error = 10.0 * tol |
k = 1 |
|
do while (k.le.maxit .and. error.gt. tol) |
|
error = 0.0 |
|
* Copy new solution into old |
!$omp parallel |
|
!$omp do |
do j=1,m |
do i=1,n |
uold(i,j) = u(i,j) |
enddo |
enddo |
|
* Compute stencil, residual, & update |
|
!$omp do private(resid) reduction(+:error) |
do j = 2,m-1 |
do i = 2,n-1 |
* Evaluate residual |
resid = (ax*(uold(i-1,j) + uold(i+1,j)) |
& + ay*(uold(i,j-1) + uold(i,j+1)) |
& + b * uold(i,j) - f(i,j))/b |
* Update solution |
u(i,j) = uold(i,j) - omega * resid |
* Accumulate residual error |
error = error + resid*resid |
end do |
enddo |
!$omp enddo nowait |
|
!$omp end parallel |
|
* Error check |
|
k = k + 1 |
|
error = sqrt(error)/dble(n*m) |
* |
enddo ! End iteration loop |
* |
print *, 'Total Number of Iterations ', k |
print *, 'Residual ', error |
|
return |
end |
|
subroutine error_check (n,m,alpha,dx,dy,u,f) |
implicit none |
************************************************************ |
* Checks error between numerical and exact solution |
* |
************************************************************ |
|
integer n,m |
double precision u(n,m),f(n,m),dx,dy,alpha |
|
integer i,j |
double precision xx,yy,temp,error |
|
dx = 2.0 / (n-1) |
dy = 2.0 / (m-1) |
error = 0.0 |
|
!$omp parallel do private(xx,yy,temp) reduction(+:error) |
do j = 1,m |
do i = 1,n |
xx = -1.0d0 + dx * dble(i-1) |
yy = -1.0d0 + dy * dble(j-1) |
temp = u(i,j) - (1.0-xx*xx)*(1.0-yy*yy) |
error = error + temp*temp |
enddo |
enddo |
|
error = sqrt(error)/dble(n*m) |
|
print *, 'Solution Error : ',error |
|
return |
end |
/libgomp.fortran/reduction2.f90
0,0 → 1,73
! { dg-do run } |
!$ use omp_lib |
|
logical :: l, la (4), m, ma (4), v |
integer :: n, cnt |
|
l = .true. |
la = (/.true., .false., .true., .true./) |
m = .false. |
ma = (/.false., .false., .false., .true./) |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (.and.:l, la) reduction (.or.:m, ma) |
!$ if (.not. l .or. any (.not. la)) v = .true. |
!$ if (m .or. any (ma)) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
l = .false. |
la(3) = .false. |
ma(2) = .true. |
else if (n .eq. 1) then |
l = .false. |
la(4) = .false. |
ma(1) = .true. |
else |
la(3) = .false. |
m = .true. |
ma(1) = .true. |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (l .or. any (la .neqv. (/.true., .false., .false., .false./))) call abort |
if (.not. m .or. any (ma .neqv. (/.true., .true., .false., .true./))) call abort |
end if |
|
l = .true. |
la = (/.true., .false., .true., .true./) |
m = .false. |
ma = (/.false., .false., .false., .true./) |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (.eqv.:l, la) reduction (.neqv.:m, ma) |
!$ if (.not. l .or. any (.not. la)) v = .true. |
!$ if (m .or. any (ma)) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
l = .false. |
la(3) = .false. |
ma(2) = .true. |
else if (n .eq. 1) then |
l = .false. |
la(4) = .false. |
ma(1) = .true. |
else |
la(3) = .false. |
m = .true. |
ma(1) = .true. |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (.not. l .or. any (la .neqv. (/.true., .false., .true., .false./))) call abort |
if (.not. m .or. any (ma .neqv. (/.false., .true., .false., .true./))) call abort |
end if |
|
end |
/libgomp.fortran/reduction3.f90
0,0 → 1,103
! { dg-do run } |
!$ use omp_lib |
|
integer (kind = 4) :: i, ia (6), n, cnt |
real :: r, ra (4) |
double precision :: d, da (5) |
logical :: v |
|
i = 1 |
ia = 2 |
r = 3 |
ra = 4 |
d = 5.5 |
da = 6.5 |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (max:i, ia, r, ra, d, da) |
!$ if (i .ne. -huge(i)-1 .or. any (ia .ne. -huge(ia)-1)) v = .true. |
!$ if (r .ge. -1.0d38 .or. any (ra .ge. -1.0d38)) v = .true. |
!$ if (d .ge. -1.0d300 .or. any (da .ge. -1.0d300)) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = 4 |
ia(3:5) = -2 |
ia(1) = 7 |
r = 5 |
ra(1:2) = 6.5 |
d = -2.5 |
da(2:4) = 8.5 |
else if (n .eq. 1) then |
i = 2 |
ia(4:6) = 5 |
r = 1 |
ra(2:4) = -1.5 |
d = 8.5 |
da(1:3) = 2.5 |
else |
i = 1 |
ia = 1 |
r = -1 |
ra = -1 |
d = 1 |
da = -1 |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (i .ne. 4 .or. any (ia .ne. (/7, 2, 2, 5, 5, 5/))) call abort |
if (r .ne. 5 .or. any (ra .ne. (/6.5, 6.5, 4., 4./))) call abort |
if (d .ne. 8.5 .or. any (da .ne. (/6.5, 8.5, 8.5, 8.5, 6.5/))) call abort |
end if |
|
i = 1 |
ia = 2 |
r = 3 |
ra = 4 |
d = 5.5 |
da = 6.5 |
v = .false. |
cnt = -1 |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (min:i, ia, r, ra, d, da) |
!$ if (i .ne. 2147483647 .or. any (ia .ne. 2147483647)) v = .true. |
!$ if (r .le. 1.0d38 .or. any (ra .le. 1.0d38)) v = .true. |
!$ if (d .le. 1.0d300 .or. any (da .le. 1.0d300)) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = 4 |
ia(3:5) = -2 |
ia(1) = 7 |
r = 5 |
ra(1:2) = 6.5 |
d = -2.5 |
da(2:4) = 8.5 |
else if (n .eq. 1) then |
i = 2 |
ia(4:6) = 5 |
r = 1 |
ra(2:4) = -1.5 |
d = 8.5 |
da(1:3) = 2.5 |
else |
i = 1 |
ia = 1 |
r = -1 |
ra = 7 |
ra(3) = -8.5 |
d = 1 |
da(1:4) = 6 |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
if (i .ne. 1 .or. any (ia .ne. (/1, 1, -2, -2, -2, 1/))) call abort |
if (r .ne. -1 .or. any (ra .ne. (/4., -1.5, -8.5, -1.5/))) call abort |
if (d .ne. -2.5 .or. any (da .ne. (/2.5, 2.5, 2.5, 6., 6.5/))) call abort |
end if |
end |
/libgomp.fortran/reduction4.f90
0,0 → 1,56
! { dg-do run } |
!$ use omp_lib |
|
integer (kind = 4) :: i, ia (6), j, ja (6), k, ka (6), ta (6), n, cnt, x |
logical :: v |
|
i = Z'ffff0f' |
ia = Z'f0ff0f' |
j = Z'0f0000' |
ja = Z'0f5a00' |
k = Z'055aa0' |
ka = Z'05a5a5' |
v = .false. |
cnt = -1 |
x = not(0) |
|
!$omp parallel num_threads (3) private (n) reduction (.or.:v) & |
!$omp & reduction (iand:i, ia) reduction (ior:j, ja) reduction (ieor:k, ka) |
!$ if (i .ne. x .or. any (ia .ne. x)) v = .true. |
!$ if (j .ne. 0 .or. any (ja .ne. 0)) v = .true. |
!$ if (k .ne. 0 .or. any (ka .ne. 0)) v = .true. |
n = omp_get_thread_num () |
if (n .eq. 0) then |
cnt = omp_get_num_threads () |
i = Z'ff7fff' |
ia(3:5) = Z'fffff1' |
j = Z'078000' |
ja(1:3) = 1 |
k = Z'78' |
ka(3:6) = Z'f0f' |
else if (n .eq. 1) then |
i = Z'ffff77' |
ia(2:5) = Z'ffafff' |
j = Z'007800' |
ja(2:5) = 8 |
k = Z'57' |
ka(3:4) = Z'f0108' |
else |
i = Z'777fff' |
ia(1:2) = Z'fffff3' |
j = Z'000780' |
ja(5:6) = Z'f00' |
k = Z'1000' |
ka(6:6) = Z'777' |
end if |
!$omp end parallel |
if (v) call abort |
if (cnt .eq. 3) then |
ta = (/Z'f0ff03', Z'f0af03', Z'f0af01', Z'f0af01', Z'f0af01', Z'f0ff0f'/) |
if (i .ne. Z'777f07' .or. any (ia .ne. ta)) call abort |
ta = (/Z'f5a01', Z'f5a09', Z'f5a09', Z'f5a08', Z'f5f08', Z'f5f00'/) |
if (j .ne. Z'fff80' .or. any (ja .ne. ta)) call abort |
ta = (/Z'5a5a5', Z'5a5a5', Z'aaba2', Z'aaba2', Z'5aaaa', Z'5addd'/) |
if (k .ne. Z'54a8f' .or. any (ka .ne. ta)) call abort |
end if |
end |
/libgomp.fortran/reduction5.f90
0,0 → 1,41
! { dg-do run } |
|
module reduction5 |
intrinsic ior, min, max |
end module reduction5 |
|
call test1 |
call test2 |
contains |
subroutine test1 |
use reduction5, bitwise_or => ior |
integer :: n |
n = Z'f' |
!$omp parallel sections num_threads (3) reduction (bitwise_or: n) |
n = ior (n, Z'20') |
!$omp section |
n = bitwise_or (Z'410', n) |
!$omp section |
n = bitwise_or (n, Z'2000') |
!$omp end parallel sections |
if (n .ne. Z'243f') call abort |
end subroutine |
subroutine test2 |
use reduction5, min => max, max => min |
integer :: m, n |
m = 8 |
n = 4 |
!$omp parallel sections num_threads (3) reduction (min: n) & |
!$omp & reduction (max: m) |
if (m .gt. 13) m = 13 |
if (n .lt. 11) n = 11 |
!$omp section |
if (m .gt. 5) m = 5 |
if (n .lt. 15) n = 15 |
!$omp section |
if (m .gt. 3) m = 3 |
if (n .lt. -1) n = -1 |
!$omp end parallel sections |
if (m .ne. 3 .or. n .ne. 15) call abort |
end subroutine test2 |
end |
/libgomp.fortran/omp_cond3.F90
0,0 → 1,24
! Test conditional compilation in free form if -fopenmp |
! { dg-options "-fopenmp" } |
10 foo = 2& |
&56 |
if (foo.ne.256) call abort |
bar = 26 |
!$ 20 ba& |
!$ &r = 4& |
!$2 |
!$bar = 62 |
!$ bar = bar + 2 |
#ifdef _OPENMP |
bar = bar - 1 |
#endif |
if (bar.ne.43) call abort |
baz = bar |
!$ 30 baz = 5& ! Comment |
!$12 & |
!$ + 2 |
!$X baz = 0 ! Not valid OpenMP conditional compilation lines |
! $ baz = 1 |
baz = baz + 1 !$ baz = 2 |
if (baz.ne.515) call abort |
end |
/libgomp.fortran/reduction6.f90
0,0 → 1,32
! { dg-do run } |
|
integer, dimension (6, 6) :: a |
character (36) :: c |
integer nthreads |
a = 9 |
nthreads = -1 |
call foo (a (2:4, 3:5), nthreads) |
if (nthreads .eq. 3) then |
write (c, '(36i1)') a |
if (c .ne. '999999999999966699966699966699999999') call abort |
end if |
contains |
subroutine foo (b, nthreads) |
use omp_lib |
integer, dimension (3:, 5:) :: b |
integer :: err, nthreads |
b = 0 |
err = 0 |
!$omp parallel num_threads (3) reduction (+:b) |
if (any (b .ne. 0)) then |
!$omp atomic |
err = err + 1 |
end if |
!$omp master |
nthreads = omp_get_num_threads () |
!$omp end master |
b = 2 |
!$omp end parallel |
if (err .gt. 0) call abort |
end subroutine foo |
end |
/libgomp.fortran/omp_cond4.F90
0,0 → 1,24
! Test conditional compilation in free form if -fno-openmp |
! { dg-options "-fno-openmp" } |
10 foo = 2& |
&56 |
if (foo.ne.256) call abort |
bar = 26 |
!$ 20 ba& |
!$ &r = 4& |
!$2 |
!$bar = 62 |
!$ bar = bar + 2 |
#ifdef _OPENMP |
bar = bar - 1 |
#endif |
if (bar.ne.26) call abort |
baz = bar |
!$ 30 baz = 5& ! Comment |
!$12 & |
!$ + 2 |
!$X baz = 0 ! Not valid OpenMP conditional compilation lines |
! $ baz = 1 |
baz = baz + 1 !$ baz = 2 |
if (baz.ne.27) call abort |
end |
/libgomp.fortran/workshare1.f90
0,0 → 1,30
function foo () |
integer :: foo |
logical :: foo_seen |
common /foo_seen/ foo_seen |
foo_seen = .true. |
foo = 3 |
end |
function bar () |
integer :: bar |
logical :: bar_seen |
common /bar_seen/ bar_seen |
bar_seen = .true. |
bar = 3 |
end |
integer :: a (10), b (10), foo, bar |
logical :: foo_seen, bar_seen |
common /foo_seen/ foo_seen |
common /bar_seen/ bar_seen |
|
foo_seen = .false. |
bar_seen = .false. |
!$omp parallel workshare if (foo () .gt. 2) num_threads (bar () + 1) |
a = 10 |
b = 20 |
a(1:5) = max (a(1:5), b(1:5)) |
!$omp end parallel workshare |
if (any (a(1:5) .ne. 20)) call abort |
if (any (a(6:10) .ne. 10)) call abort |
if (.not. foo_seen .or. .not. bar_seen) call abort |
end |
/libgomp.fortran/omp_workshare2.f
0,0 → 1,56
C****************************************************************************** |
C FILE: omp_workshare2.f |
C DESCRIPTION: |
C OpenMP Example - Sections Work-sharing - Fortran Version |
C In this example, the OpenMP SECTION directive is used to assign |
C different array operations to threads that execute a SECTION. Each |
C thread receives its own copy of the result array to work with. |
C AUTHOR: Blaise Barney 5/99 |
C LAST REVISED: 01/09/04 |
C****************************************************************************** |
|
PROGRAM WORKSHARE2 |
|
INTEGER N, I, NTHREADS, TID, OMP_GET_NUM_THREADS, |
+ OMP_GET_THREAD_NUM |
PARAMETER (N=50) |
REAL A(N), B(N), C(N) |
|
! Some initializations |
DO I = 1, N |
A(I) = I * 1.0 |
B(I) = A(I) |
ENDDO |
|
!$OMP PARALLEL SHARED(A,B,NTHREADS), PRIVATE(C,I,TID) |
TID = OMP_GET_THREAD_NUM() |
IF (TID .EQ. 0) THEN |
NTHREADS = OMP_GET_NUM_THREADS() |
PRINT *, 'Number of threads =', NTHREADS |
END IF |
PRINT *, 'Thread',TID,' starting...' |
|
!$OMP SECTIONS |
|
!$OMP SECTION |
PRINT *, 'Thread',TID,' doing section 1' |
DO I = 1, N |
C(I) = A(I) + B(I) |
WRITE(*,100) TID,I,C(I) |
100 FORMAT(' Thread',I2,': C(',I2,')=',F8.2) |
ENDDO |
|
!$OMP SECTION |
PRINT *, 'Thread',TID,' doing section 2' |
DO I = 1+N/2, N |
C(I) = A(I) * B(I) |
WRITE(*,100) TID,I,C(I) |
ENDDO |
|
!$OMP END SECTIONS NOWAIT |
|
PRINT *, 'Thread',TID,' done.' |
|
!$OMP END PARALLEL |
|
END |
/libgomp.fortran/pr29629.f90
0,0 → 1,20
! PR fortran/29629 |
! { dg-do run } |
|
program pr29629 |
integer :: n |
n = 10000 |
if (any (func(n).ne.10000)) call abort |
contains |
function func(n) |
integer, intent(in) :: n |
integer, dimension(n) :: func |
integer :: k |
func = 0 |
!$omp parallel do private(k), reduction(+:func), num_threads(4) |
do k = 1, n |
func = func + 1 |
end do |
!$omp end parallel do |
end function |
end program |
/libgomp.fortran/omp_reduction.f
0,0 → 1,33
C****************************************************************************** |
C FILE: omp_reduction.f |
C DESCRIPTION: |
C OpenMP Example - Combined Parallel Loop Reduction - Fortran Version |
C This example demonstrates a sum reduction within a combined parallel loop |
C construct. Notice that default data element scoping is assumed - there |
C are no clauses specifying shared or private variables. OpenMP will |
C automatically make loop index variables private within team threads, and |
C global variables shared. |
C AUTHOR: Blaise Barney 5/99 |
C LAST REVISED: |
C****************************************************************************** |
|
PROGRAM REDUCTION |
|
INTEGER I, N |
REAL A(100), B(100), SUM |
|
! Some initializations |
N = 100 |
DO I = 1, N |
A(I) = I *1.0 |
B(I) = A(I) |
ENDDO |
SUM = 0.0 |
|
!$OMP PARALLEL DO REDUCTION(+:SUM) |
DO I = 1, N |
SUM = SUM + (A(I) * B(I)) |
ENDDO |
|
PRINT *, ' Sum = ', SUM |
END |
/libgomp.fortran/vla1.f90
0,0 → 1,185
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x |
character (len = 1) :: y |
s = 'PQRSTUV' |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = '_+|/Oo_' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = '///|||!' |
l = .false. |
!$omp parallel default (none) firstprivate (c, d, e, f, g, h, i, j, k) & |
!$omp & firstprivate (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) |
l = l .or. c .ne. 'abcdefghijkl' |
l = l .or. d .ne. 'ABCDEFG' |
l = l .or. s .ne. 'PQRSTUV' |
do 100, p = 1, 2 |
do 100, q = 3, 7 |
do 100, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. '0123456789AB' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. '9876543210ZY' |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. '0123456' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. '9876543' |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. '_+|/Oo_' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. '///|||!' |
100 continue |
do 101, p = 3, 5 |
do 101, q = 2, 6 |
do 101, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. 7.5 * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. 9.5 * p * q * r |
101 continue |
do 102, p = 1, 5 |
do 102, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + p + 7 + 3 * q |
102 continue |
x = omp_get_thread_num () |
w = '' |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
!$omp barrier |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
!$omp end parallel |
if (l) call abort |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
c = 'abcdefghijkl' |
d = 'ABCDEFG' |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = '0123456789AB' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = '9876543210ZY' |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = '0123456' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = '9876543' |
forall (p = 3:5, q = 2:6, r = 1:7) i(p, q, r) = 7.5 * p * q * r |
forall (p = 3:6, q = 2:6, r = 1:7) j(p, q, r) = 9.5 * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q, r) = 19 + p + q + 3 * r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/vla2.f90
0,0 → 1,142
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x |
character (len = 1) :: y |
l = .false. |
!$omp parallel default (none) private (c, d, e, f, g, h, i, j, k) & |
!$omp & private (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) |
x = omp_get_thread_num () |
w = '' |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
!$omp barrier |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
!$omp end parallel |
if (l) call abort |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/vla3.f90
0,0 → 1,191
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x, z |
character (len = 1) :: y |
s = 'PQRSTUV' |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = '_+|/Oo_' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = '///|||!' |
l = .false. |
!$omp parallel default (none) shared (c, d, e, f, g, h, i, j, k) & |
!$omp & shared (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) |
l = l .or. c .ne. 'abcdefghijkl' |
l = l .or. d .ne. 'ABCDEFG' |
l = l .or. s .ne. 'PQRSTUV' |
do 100, p = 1, 2 |
do 100, q = 3, 7 |
do 100, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. '0123456789AB' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. '9876543210ZY' |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. '0123456' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. '9876543' |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. '_+|/Oo_' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. '///|||!' |
100 continue |
do 101, p = 3, 5 |
do 101, q = 2, 6 |
do 101, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. 7.5 * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. 9.5 * p * q * r |
101 continue |
do 102, p = 1, 5 |
do 102, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + p + 7 + 3 * q |
102 continue |
do 110 z = 0, omp_get_num_threads () - 1 |
!$omp barrier |
x = omp_get_thread_num () |
w = '' |
if (z .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (z .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (z .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (z .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (z .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (z .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
if (x .eq. z) then |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
end if |
!$omp barrier |
x = z |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
110 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
!$omp end parallel |
if (l) call abort |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
c = 'abcdefghijkl' |
d = 'ABCDEFG' |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = '0123456789AB' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = '9876543210ZY' |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = '0123456' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = '9876543' |
forall (p = 3:5, q = 2:6, r = 1:7) i(p, q, r) = 7.5 * p * q * r |
forall (p = 3:6, q = 2:6, r = 1:7) j(p, q, r) = 9.5 * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q, r) = 19 + p + q + 3 * r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/vla4.f90
0,0 → 1,228
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x, z, z2 |
character (len = 1) :: y |
s = 'PQRSTUV' |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = '_+|/Oo_' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = '///|||!' |
l = .false. |
call omp_set_dynamic (.false.) |
call omp_set_num_threads (6) |
!$omp parallel do default (none) firstprivate (c, d, e, f, g, h, i, j, k) & |
!$omp & firstprivate (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) schedule (static) shared (z2) & |
!$omp lastprivate (c, d, e, f, g, h, i, j, k, s, t, u, v) |
do 110 z = 0, omp_get_num_threads () - 1 |
if (omp_get_thread_num () .eq. 0) z2 = omp_get_num_threads () |
l = l .or. c .ne. 'abcdefghijkl' |
l = l .or. d .ne. 'ABCDEFG' |
l = l .or. s .ne. 'PQRSTUV' |
do 100, p = 1, 2 |
do 100, q = 3, 7 |
do 100, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. '0123456789AB' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. '9876543210ZY' |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. '0123456' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. '9876543' |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. '_+|/Oo_' |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. '///|||!' |
100 continue |
do 101, p = 3, 5 |
do 101, q = 2, 6 |
do 101, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. 7.5 * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. 9.5 * p * q * r |
101 continue |
do 102, p = 1, 5 |
do 102, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + p + 7 + 3 * q |
102 continue |
x = omp_get_thread_num () |
w = '' |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
!$omp barrier |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
110 continue |
!$omp end parallel do |
if (l) call abort |
if (z2 == 6) then |
x = 5 |
w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 113, p = 1, 2 |
do 113, q = 3, 7 |
do 113, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
113 continue |
do 114, p = 3, 5 |
do 114, q = 2, 6 |
do 114, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
114 continue |
do 115, p = 1, 5 |
do 115, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
115 continue |
if (l) call abort |
end if |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
c = 'abcdefghijkl' |
d = 'ABCDEFG' |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = '0123456789AB' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = '9876543210ZY' |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = '0123456' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = '9876543' |
forall (p = 3:5, q = 2:6, r = 1:7) i(p, q, r) = 7.5 * p * q * r |
forall (p = 3:6, q = 2:6, r = 1:7) j(p, q, r) = 9.5 * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q, r) = 19 + p + q + 3 * r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/omp_cond1.f
0,0 → 1,22
C Test conditional compilation in fixed form if -fopenmp |
! { dg-options "-fopenmp" } |
10 foo = 2 |
&56 |
if (foo.ne.256) call abort |
bar = 26 |
!$2 0 ba |
c$ +r = 42 |
!$ bar = 62 |
!$ bar = bar + 1 |
if (bar.ne.43) call abort |
baz = bar |
*$ 0baz = 5 |
C$ +12! Comment |
c$ !4 |
!$ +!Another comment |
*$ &2 |
!$ X baz = 0 ! Not valid OpenMP conditional compilation lines |
! $ baz = 1 |
c$ 10&baz = 2 |
if (baz.ne.51242) call abort |
end |
/libgomp.fortran/vla5.f90
0,0 → 1,200
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x, z, z2 |
character (len = 1) :: y |
s = 'PQRSTUV' |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = '_+|/Oo_' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = '///|||!' |
l = .false. |
call omp_set_dynamic (.false.) |
call omp_set_num_threads (6) |
!$omp parallel do default (none) lastprivate (c, d, e, f, g, h, i, j, k) & |
!$omp & lastprivate (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) schedule (static) shared (z2) |
do 110 z = 0, omp_get_num_threads () - 1 |
if (omp_get_thread_num () .eq. 0) z2 = omp_get_num_threads () |
x = omp_get_thread_num () |
w = '' |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
!$omp barrier |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
110 continue |
!$omp end parallel do |
if (l) call abort |
if (z2 == 6) then |
x = 5 |
w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 113, p = 1, 2 |
do 113, q = 3, 7 |
do 113, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
113 continue |
do 114, p = 3, 5 |
do 114, q = 2, 6 |
do 114, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
114 continue |
do 115, p = 1, 5 |
do 115, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
115 continue |
if (l) call abort |
end if |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
c = 'abcdefghijkl' |
d = 'ABCDEFG' |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = '0123456789AB' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = '9876543210ZY' |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = '0123456' |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = '9876543' |
forall (p = 3:5, q = 2:6, r = 1:7) i(p, q, r) = 7.5 * p * q * r |
forall (p = 3:6, q = 2:6, r = 1:7) j(p, q, r) = 9.5 * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q, r) = 19 + p + q + 3 * r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/vla6.f90
0,0 → 1,191
! { dg-do run } |
|
call test |
contains |
subroutine check (x, y, l) |
integer :: x, y |
logical :: l |
l = l .or. x .ne. y |
end subroutine check |
|
subroutine foo (c, d, e, f, g, h, i, j, k, n) |
use omp_lib |
integer :: n |
character (len = *) :: c |
character (len = n) :: d |
integer, dimension (2, 3:5, n) :: e |
integer, dimension (2, 3:n, n) :: f |
character (len = *), dimension (5, 3:n) :: g |
character (len = n), dimension (5, 3:n) :: h |
real, dimension (:, :, :) :: i |
double precision, dimension (3:, 5:, 7:) :: j |
integer, dimension (:, :, :) :: k |
logical :: l |
integer :: p, q, r |
character (len = n) :: s |
integer, dimension (2, 3:5, n) :: t |
integer, dimension (2, 3:n, n) :: u |
character (len = n), dimension (5, 3:n) :: v |
character (len = 2 * n + 24) :: w |
integer :: x, z |
character (len = 1) :: y |
l = .false. |
!$omp parallel default (none) private (c, d, e, f, g, h, i, j, k) & |
!$omp & private (s, t, u, v) reduction (.or.:l) num_threads (6) & |
!$omp private (p, q, r, w, x, y) shared (z) |
x = omp_get_thread_num () |
w = '' |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
c = w(8:19) |
d = w(1:7) |
forall (p = 1:2, q = 3:5, r = 1:7) e(p, q, r) = 5 * x + p + q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) f(p, q, r) = 25 * x + p + q + 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) g(p, q) = w(8:19) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) g(p, q) = w(27:38) |
forall (p = 1:5, q = 3:7, p + q .le. 8) h(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) h(p, q) = w(20:26) |
forall (p = 3:5, q = 2:6, r = 1:7) i(p - 2, q - 1, r) = (7.5 + x) * p * q * r |
forall (p = 3:5, q = 2:6, r = 1:7) j(p, q + 3, r + 6) = (9.5 + x) * p * q * r |
forall (p = 1:5, q = 7:7, r = 4:6) k(p, q - 6, r - 3) = 19 + x + p + q + 3 * r |
s = w(20:26) |
forall (p = 1:2, q = 3:5, r = 1:7) t(p, q, r) = -10 + x + p - q + 2 * r |
forall (p = 1:2, q = 3:7, r = 1:7) u(p, q, r) = 30 - x - p + q - 2 * r |
forall (p = 1:5, q = 3:7, p + q .le. 8) v(p, q) = w(1:7) |
forall (p = 1:5, q = 3:7, p + q .gt. 8) v(p, q) = w(20:26) |
!$omp barrier |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 103, p = 1, 2 |
do 103, q = 3, 7 |
do 103, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
103 continue |
do 104, p = 3, 5 |
do 104, q = 2, 6 |
do 104, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
104 continue |
do 105, p = 1, 5 |
do 105, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
105 continue |
call check (size (e, 1), 2, l) |
call check (size (e, 2), 3, l) |
call check (size (e, 3), 7, l) |
call check (size (e), 42, l) |
call check (size (f, 1), 2, l) |
call check (size (f, 2), 5, l) |
call check (size (f, 3), 7, l) |
call check (size (f), 70, l) |
call check (size (g, 1), 5, l) |
call check (size (g, 2), 5, l) |
call check (size (g), 25, l) |
call check (size (h, 1), 5, l) |
call check (size (h, 2), 5, l) |
call check (size (h), 25, l) |
call check (size (i, 1), 3, l) |
call check (size (i, 2), 5, l) |
call check (size (i, 3), 7, l) |
call check (size (i), 105, l) |
call check (size (j, 1), 4, l) |
call check (size (j, 2), 5, l) |
call check (size (j, 3), 7, l) |
call check (size (j), 140, l) |
call check (size (k, 1), 5, l) |
call check (size (k, 2), 1, l) |
call check (size (k, 3), 3, l) |
call check (size (k), 15, l) |
!$omp single |
z = omp_get_thread_num () |
!$omp end single copyprivate (c, d, e, f, g, h, i, j, k, s, t, u, v) |
w = '' |
x = z |
if (x .eq. 0) w = 'thread0thr_number_0THREAD0THR_NUMBER_0' |
if (x .eq. 1) w = 'thread1thr_number_1THREAD1THR_NUMBER_1' |
if (x .eq. 2) w = 'thread2thr_number_2THREAD2THR_NUMBER_2' |
if (x .eq. 3) w = 'thread3thr_number_3THREAD3THR_NUMBER_3' |
if (x .eq. 4) w = 'thread4thr_number_4THREAD4THR_NUMBER_4' |
if (x .eq. 5) w = 'thread5thr_number_5THREAD5THR_NUMBER_5' |
y = '' |
if (x .eq. 0) y = '0' |
if (x .eq. 1) y = '1' |
if (x .eq. 2) y = '2' |
if (x .eq. 3) y = '3' |
if (x .eq. 4) y = '4' |
if (x .eq. 5) y = '5' |
l = l .or. w(7:7) .ne. y |
l = l .or. w(19:19) .ne. y |
l = l .or. w(26:26) .ne. y |
l = l .or. w(38:38) .ne. y |
l = l .or. c .ne. w(8:19) |
l = l .or. d .ne. w(1:7) |
l = l .or. s .ne. w(20:26) |
do 113, p = 1, 2 |
do 113, q = 3, 7 |
do 113, r = 1, 7 |
if (q .lt. 6) l = l .or. e(p, q, r) .ne. 5 * x + p + q + 2 * r |
l = l .or. f(p, q, r) .ne. 25 * x + p + q + 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. g(r, q) .ne. w(8:19) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. g(r, q) .ne. w(27:38) |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. h(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. h(r, q) .ne. w(20:26) |
if (q .lt. 6) l = l .or. t(p, q, r) .ne. -10 + x + p - q + 2 * r |
l = l .or. u(p, q, r) .ne. 30 - x - p + q - 2 * r |
if (r .lt. 6 .and. q + r .le. 8) l = l .or. v(r, q) .ne. w(1:7) |
if (r .lt. 6 .and. q + r .gt. 8) l = l .or. v(r, q) .ne. w(20:26) |
113 continue |
do 114, p = 3, 5 |
do 114, q = 2, 6 |
do 114, r = 1, 7 |
l = l .or. i(p - 2, q - 1, r) .ne. (7.5 + x) * p * q * r |
l = l .or. j(p, q + 3, r + 6) .ne. (9.5 + x) * p * q * r |
114 continue |
do 115, p = 1, 5 |
do 115, q = 4, 6 |
l = l .or. k(p, 1, q - 3) .ne. 19 + x + p + 7 + 3 * q |
115 continue |
!$omp end parallel |
if (l) call abort |
end subroutine foo |
|
subroutine test |
character (len = 12) :: c |
character (len = 7) :: d |
integer, dimension (2, 3:5, 7) :: e |
integer, dimension (2, 3:7, 7) :: f |
character (len = 12), dimension (5, 3:7) :: g |
character (len = 7), dimension (5, 3:7) :: h |
real, dimension (3:5, 2:6, 1:7) :: i |
double precision, dimension (3:6, 2:6, 1:7) :: j |
integer, dimension (1:5, 7:7, 4:6) :: k |
integer :: p, q, r |
call foo (c, d, e, f, g, h, i, j, k, 7) |
end subroutine test |
end |
/libgomp.fortran/vla7.f90
0,0 → 1,143
! { dg-do run } |
! { dg-options "-w" } |
|
character (6) :: c, f2 |
character (6) :: d(2) |
c = f1 (6) |
if (c .ne. 'opqrst') call abort |
c = f2 (6) |
if (c .ne. '_/!!/_') call abort |
d = f3 (6) |
if (d(1) .ne. 'opqrst' .or. d(2) .ne. 'a') call abort |
d = f4 (6) |
if (d(1) .ne. 'Opqrst' .or. d(2) .ne. 'A') call abort |
contains |
function f1 (n) |
use omp_lib |
character (n) :: f1 |
logical :: l |
f1 = 'abcdef' |
l = .false. |
!$omp parallel firstprivate (f1) reduction (.or.:l) num_threads (2) |
l = f1 .ne. 'abcdef' |
if (omp_get_thread_num () .eq. 0) f1 = 'ijklmn' |
if (omp_get_thread_num () .eq. 1) f1 = 'IJKLMN' |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. f1 .ne. 'ijklmn') |
l = l .or. (omp_get_thread_num () .eq. 1 .and. f1 .ne. 'IJKLMN') |
!$omp end parallel |
f1 = 'zZzz_z' |
!$omp parallel shared (f1) reduction (.or.:l) num_threads (2) |
l = l .or. f1 .ne. 'zZzz_z' |
!$omp barrier |
!$omp master |
f1 = 'abc' |
!$omp end master |
!$omp barrier |
l = l .or. f1 .ne. 'abc' |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) f1 = 'def' |
!$omp barrier |
l = l .or. f1 .ne. 'def' |
!$omp end parallel |
if (l) call abort |
f1 = 'opqrst' |
end function f1 |
function f3 (n) |
use omp_lib |
character (n), dimension (2) :: f3 |
logical :: l |
f3 = 'abcdef' |
l = .false. |
!$omp parallel firstprivate (f3) reduction (.or.:l) num_threads (2) |
l = any (f3 .ne. 'abcdef') |
if (omp_get_thread_num () .eq. 0) f3 = 'ijklmn' |
if (omp_get_thread_num () .eq. 1) f3 = 'IJKLMN' |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. any (f3 .ne. 'ijklmn')) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. any (f3 .ne. 'IJKLMN')) |
!$omp end parallel |
f3 = 'zZzz_z' |
!$omp parallel shared (f3) reduction (.or.:l) num_threads (2) |
l = l .or. any (f3 .ne. 'zZzz_z') |
!$omp barrier |
!$omp master |
f3 = 'abc' |
!$omp end master |
!$omp barrier |
l = l .or. any (f3 .ne. 'abc') |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) f3 = 'def' |
!$omp barrier |
l = l .or. any (f3 .ne. 'def') |
!$omp end parallel |
if (l) call abort |
f3(1) = 'opqrst' |
f3(2) = 'a' |
end function f3 |
function f4 (n) |
use omp_lib |
character (n), dimension (n - 4) :: f4 |
logical :: l |
f4 = 'abcdef' |
l = .false. |
!$omp parallel firstprivate (f4) reduction (.or.:l) num_threads (2) |
l = any (f4 .ne. 'abcdef') |
if (omp_get_thread_num () .eq. 0) f4 = 'ijklmn' |
if (omp_get_thread_num () .eq. 1) f4 = 'IJKLMN' |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. any (f4 .ne. 'ijklmn')) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. any (f4 .ne. 'IJKLMN')) |
l = l .or. size (f4) .ne. 2 |
!$omp end parallel |
f4 = 'zZzz_z' |
!$omp parallel shared (f4) reduction (.or.:l) num_threads (2) |
l = l .or. any (f4 .ne. 'zZzz_z') |
!$omp barrier |
!$omp master |
f4 = 'abc' |
!$omp end master |
!$omp barrier |
l = l .or. any (f4 .ne. 'abc') |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) f4 = 'def' |
!$omp barrier |
l = l .or. any (f4 .ne. 'def') |
l = l .or. size (f4) .ne. 2 |
!$omp end parallel |
if (l) call abort |
f4(1) = 'Opqrst' |
f4(2) = 'A' |
end function f4 |
end |
function f2 (n) |
use omp_lib |
character (*) :: f2 |
logical :: l |
f2 = 'abcdef' |
l = .false. |
!$omp parallel firstprivate (f2) reduction (.or.:l) num_threads (2) |
l = f2 .ne. 'abcdef' |
if (omp_get_thread_num () .eq. 0) f2 = 'ijklmn' |
if (omp_get_thread_num () .eq. 1) f2 = 'IJKLMN' |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. f2 .ne. 'ijklmn') |
l = l .or. (omp_get_thread_num () .eq. 1 .and. f2 .ne. 'IJKLMN') |
!$omp end parallel |
f2 = 'zZzz_z' |
!$omp parallel shared (f2) reduction (.or.:l) num_threads (2) |
l = l .or. f2 .ne. 'zZzz_z' |
!$omp barrier |
!$omp master |
f2 = 'abc' |
!$omp end master |
!$omp barrier |
l = l .or. f2 .ne. 'abc' |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) f2 = 'def' |
!$omp barrier |
l = l .or. f2 .ne. 'def' |
!$omp end parallel |
if (l) call abort |
f2 = '_/!!/_' |
end function f2 |
/libgomp.fortran/retval1.f90
0,0 → 1,120
! { dg-do run } |
|
function f1 () |
use omp_lib |
real :: f1 |
logical :: l |
f1 = 6.5 |
l = .false. |
!$omp parallel firstprivate (f1) num_threads (2) reduction (.or.:l) |
l = f1 .ne. 6.5 |
if (omp_get_thread_num () .eq. 0) f1 = 8.5 |
if (omp_get_thread_num () .eq. 1) f1 = 14.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. f1 .ne. 8.5) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. f1 .ne. 14.5) |
!$omp end parallel |
if (l) call abort |
f1 = -2.5 |
end function f1 |
function f2 () |
use omp_lib |
real :: f2, e2 |
logical :: l |
entry e2 () |
f2 = 6.5 |
l = .false. |
!$omp parallel firstprivate (e2) num_threads (2) reduction (.or.:l) |
l = e2 .ne. 6.5 |
if (omp_get_thread_num () .eq. 0) e2 = 8.5 |
if (omp_get_thread_num () .eq. 1) e2 = 14.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. e2 .ne. 8.5) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. e2 .ne. 14.5) |
!$omp end parallel |
if (l) call abort |
e2 = 7.5 |
end function f2 |
function f3 () |
use omp_lib |
real :: f3, e3 |
logical :: l |
entry e3 () |
f3 = 6.5 |
l = .false. |
!$omp parallel firstprivate (f3, e3) num_threads (2) reduction (.or.:l) |
l = e3 .ne. 6.5 |
l = l .or. f3 .ne. 6.5 |
if (omp_get_thread_num () .eq. 0) e3 = 8.5 |
if (omp_get_thread_num () .eq. 1) e3 = 14.5 |
f3 = e3 - 4.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. e3 .ne. 8.5) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. e3 .ne. 14.5) |
l = l .or. f3 .ne. e3 - 4.5 |
!$omp end parallel |
if (l) call abort |
e3 = 0.5 |
end function f3 |
function f4 () result (r4) |
use omp_lib |
real :: r4, s4 |
logical :: l |
entry e4 () result (s4) |
r4 = 6.5 |
l = .false. |
!$omp parallel firstprivate (r4, s4) num_threads (2) reduction (.or.:l) |
l = s4 .ne. 6.5 |
l = l .or. r4 .ne. 6.5 |
if (omp_get_thread_num () .eq. 0) s4 = 8.5 |
if (omp_get_thread_num () .eq. 1) s4 = 14.5 |
r4 = s4 - 4.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. s4 .ne. 8.5) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. s4 .ne. 14.5) |
l = l .or. r4 .ne. s4 - 4.5 |
!$omp end parallel |
if (l) call abort |
s4 = -0.5 |
end function f4 |
function f5 (is_f5) |
use omp_lib |
real :: f5 |
integer :: e5 |
logical :: l, is_f5 |
entry e5 (is_f5) |
if (is_f5) then |
f5 = 6.5 |
else |
e5 = 8 |
end if |
l = .false. |
!$omp parallel firstprivate (f5, e5) shared (is_f5) num_threads (2) & |
!$omp reduction (.or.:l) |
l = .not. is_f5 .and. e5 .ne. 8 |
l = l .or. (is_f5 .and. f5 .ne. 6.5) |
if (omp_get_thread_num () .eq. 0) e5 = 8 |
if (omp_get_thread_num () .eq. 1) e5 = 14 |
f5 = e5 - 4.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. e5 .ne. 8) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. e5 .ne. 14) |
l = l .or. f5 .ne. e5 - 4.5 |
!$omp end parallel |
if (l) call abort |
if (is_f5) f5 = -2.5 |
if (.not. is_f5) e5 = 8 |
end function f5 |
|
real :: f1, f2, e2, f3, e3, f4, e4, f5 |
integer :: e5 |
if (f1 () .ne. -2.5) call abort |
if (f2 () .ne. 7.5) call abort |
if (e2 () .ne. 7.5) call abort |
if (f3 () .ne. 0.5) call abort |
if (e3 () .ne. 0.5) call abort |
if (f4 () .ne. -0.5) call abort |
if (e4 () .ne. -0.5) call abort |
if (f5 (.true.) .ne. -2.5) call abort |
if (e5 (.false.) .ne. 8) call abort |
end |
/libgomp.fortran/retval2.f90
0,0 → 1,27
! { dg-do run } |
|
function f1 () |
real :: f1 |
f1 = 6.5 |
call sub1 |
contains |
subroutine sub1 |
use omp_lib |
logical :: l |
l = .false. |
!$omp parallel firstprivate (f1) num_threads (2) reduction (.or.:l) |
l = f1 .ne. 6.5 |
if (omp_get_thread_num () .eq. 0) f1 = 8.5 |
if (omp_get_thread_num () .eq. 1) f1 = 14.5 |
!$omp barrier |
l = l .or. (omp_get_thread_num () .eq. 0 .and. f1 .ne. 8.5) |
l = l .or. (omp_get_thread_num () .eq. 1 .and. f1 .ne. 14.5) |
!$omp end parallel |
if (l) call abort |
f1 = -2.5 |
end subroutine sub1 |
end function f1 |
|
real :: f1 |
if (f1 () .ne. -2.5) call abort |
end |
/libgomp.fortran/lib3.f
0,0 → 1,76
C { dg-do run } |
|
INCLUDE "omp_lib.h" |
|
DOUBLE PRECISION :: D, E |
LOGICAL :: L |
INTEGER (KIND = OMP_LOCK_KIND) :: LCK |
INTEGER (KIND = OMP_NEST_LOCK_KIND) :: NLCK |
|
D = OMP_GET_WTIME () |
|
CALL OMP_INIT_LOCK (LCK) |
CALL OMP_SET_LOCK (LCK) |
IF (OMP_TEST_LOCK (LCK)) CALL ABORT |
CALL OMP_UNSET_LOCK (LCK) |
IF (.NOT. OMP_TEST_LOCK (LCK)) CALL ABORT |
IF (OMP_TEST_LOCK (LCK)) CALL ABORT |
CALL OMP_UNSET_LOCK (LCK) |
CALL OMP_DESTROY_LOCK (LCK) |
|
CALL OMP_INIT_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 1) CALL ABORT |
CALL OMP_SET_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 3) CALL ABORT |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 2) CALL ABORT |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_DESTROY_NEST_LOCK (NLCK) |
|
CALL OMP_SET_DYNAMIC (.TRUE.) |
IF (.NOT. OMP_GET_DYNAMIC ()) CALL ABORT |
CALL OMP_SET_DYNAMIC (.FALSE.) |
IF (OMP_GET_DYNAMIC ()) CALL ABORT |
|
CALL OMP_SET_NESTED (.TRUE.) |
IF (.NOT. OMP_GET_NESTED ()) CALL ABORT |
CALL OMP_SET_NESTED (.FALSE.) |
IF (OMP_GET_NESTED ()) CALL ABORT |
|
CALL OMP_SET_NUM_THREADS (5) |
IF (OMP_GET_NUM_THREADS () .NE. 1) CALL ABORT |
IF (OMP_GET_MAX_THREADS () .NE. 5) CALL ABORT |
IF (OMP_GET_THREAD_NUM () .NE. 0) CALL ABORT |
CALL OMP_SET_NUM_THREADS (3) |
IF (OMP_GET_NUM_THREADS () .NE. 1) CALL ABORT |
IF (OMP_GET_MAX_THREADS () .NE. 3) CALL ABORT |
IF (OMP_GET_THREAD_NUM () .NE. 0) CALL ABORT |
L = .FALSE. |
C$OMP PARALLEL REDUCTION (.OR.:L) |
L = OMP_GET_NUM_THREADS () .NE. 3 |
L = L .OR. (OMP_GET_THREAD_NUM () .LT. 0) |
L = L .OR. (OMP_GET_THREAD_NUM () .GE. 3) |
C$OMP MASTER |
L = L .OR. (OMP_GET_THREAD_NUM () .NE. 0) |
C$OMP END MASTER |
C$OMP END PARALLEL |
IF (L) CALL ABORT |
|
IF (OMP_GET_NUM_PROCS () .LE. 0) CALL ABORT |
IF (OMP_IN_PARALLEL ()) CALL ABORT |
C$OMP PARALLEL REDUCTION (.OR.:L) |
L = .NOT. OMP_IN_PARALLEL () |
C$OMP END PARALLEL |
C$OMP PARALLEL REDUCTION (.OR.:L) IF (.TRUE.) |
L = .NOT. OMP_IN_PARALLEL () |
C$OMP END PARALLEL |
|
E = OMP_GET_WTIME () |
IF (D .GT. E) CALL ABORT |
D = OMP_GET_WTICK () |
C Negative precision is definitely wrong, |
C bigger than 1s clock resolution is also strange |
IF (D .LE. 0 .OR. D .GT. 1.) CALL ABORT |
END |
/libgomp.fortran/do1.f90
0,0 → 1,179
! { dg-do run } |
|
integer, dimension (128) :: a, b |
integer :: i |
a = -1 |
b = -1 |
do i = 1, 128 |
if (i .ge. 8 .and. i .le. 15) then |
b(i) = 1 * 256 + i |
else if (i .ge. 19 .and. i .le. 23) then |
b(i) = 2 * 256 + i |
else if (i .ge. 28 .and. i .le. 38) then |
if (iand (i, 1) .eq. 0) b(i) = 3 * 256 + i |
else if (i .ge. 59 .and. i .le. 79) then |
if (iand (i - 59, 3) .eq. 0) b(i) = 4 * 256 + i |
else if (i .ge. 101 .and. i .le. 125) then |
if (mod (i - 101, 12) .eq. 0) b(i) = 5 * 256 + i |
end if |
end do |
|
!$omp parallel num_threads (4) |
|
!$omp do |
do i = 8, 15 |
a(i) = 1 * 256 + i |
end do |
|
!$omp do |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
end do |
|
!$omp do |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
end do |
|
!$omp do |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
end do |
|
!$omp do |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b)) call abort |
a = -1 |
|
!$omp parallel num_threads (4) |
|
!$omp do schedule (static) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
end do |
|
!$omp do schedule (static, 1) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
end do |
|
!$omp do schedule (static, 3) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
end do |
|
!$omp do schedule (static, 6) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
end do |
|
!$omp do schedule (static, 2) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b)) call abort |
a = -1 |
|
!$omp parallel num_threads (4) |
|
!$omp do schedule (dynamic) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
end do |
|
!$omp do schedule (dynamic, 4) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
end do |
|
!$omp do schedule (dynamic, 1) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
end do |
|
!$omp do schedule (dynamic, 2) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
end do |
|
!$omp do schedule (dynamic, 3) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b)) call abort |
a = -1 |
|
!$omp parallel num_threads (4) |
|
!$omp do schedule (guided) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
end do |
|
!$omp do schedule (guided, 4) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
end do |
|
!$omp do schedule (guided, 1) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
end do |
|
!$omp do schedule (guided, 2) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
end do |
|
!$omp do schedule (guided, 3) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b)) call abort |
a = -1 |
|
!$omp parallel num_threads (4) |
|
!$omp do schedule (runtime) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
end do |
|
!$omp do schedule (runtime) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
end do |
|
!$omp do schedule (runtime) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
end do |
|
!$omp do schedule (runtime) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
end do |
|
!$omp do schedule (runtime) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b)) call abort |
end |
/libgomp.fortran/pr27416-1.f90
0,0 → 1,19
! PR middle-end/27416 |
! { dg-do run } |
|
integer :: j |
j = 6 |
!$omp parallel num_threads (4) |
call foo (j) |
!$omp end parallel |
if (j.ne.6+16) call abort |
end |
|
subroutine foo (j) |
integer :: i, j |
|
!$omp do firstprivate (j) lastprivate (j) |
do i = 1, 16 |
if (i.eq.16) j = j + i |
end do |
end subroutine foo |
/libgomp.fortran/threadprivate1.f90
0,0 → 1,19
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
|
module threadprivate1 |
double precision :: d |
!$omp threadprivate (d) |
end module threadprivate1 |
|
!$ use omp_lib |
use threadprivate1 |
logical :: l |
l = .false. |
!$omp parallel num_threads (4) reduction (.or.:l) |
d = omp_get_thread_num () + 6.5 |
!$omp barrier |
if (d .ne. omp_get_thread_num () + 6.5) l = .true. |
!$omp end parallel |
if (l) call abort () |
end |
/libgomp.fortran/do2.f90
0,0 → 1,366
! { dg-do run } |
|
integer, dimension (128) :: a, b |
integer :: i, j |
logical :: k |
a = -1 |
b = -1 |
do i = 1, 128 |
if (i .ge. 8 .and. i .le. 15) then |
b(i) = 1 * 256 + i |
else if (i .ge. 19 .and. i .le. 23) then |
b(i) = 2 * 256 + i |
else if (i .ge. 28 .and. i .le. 38) then |
if (iand (i, 1) .eq. 0) b(i) = 3 * 256 + i |
else if (i .ge. 59 .and. i .le. 79) then |
if (iand (i - 59, 3) .eq. 0) b(i) = 4 * 256 + i |
else if (i .ge. 101 .and. i .le. 125) then |
if (mod (i - 101, 12) .eq. 0) b(i) = 5 * 256 + i |
end if |
end do |
|
k = .false. |
j = 8 |
!$omp parallel num_threads (4) |
|
!$omp do ordered |
do i = 8, 15 |
a(i) = 1 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 23 |
!$omp end single |
|
!$omp do ordered |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 28 |
!$omp end single |
|
!$omp do ordered |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 2 |
!$omp end ordered |
end do |
|
!$omp single |
j = 79 |
!$omp end single |
|
!$omp do ordered |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 4 |
!$omp end ordered |
end do |
|
!$omp single |
j = 125 |
!$omp end single |
|
!$omp do ordered |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 12 |
!$omp end ordered |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b) .or. k) call abort |
a = -1 |
k = .false. |
j = 8 |
!$omp parallel num_threads (4) |
|
!$omp do ordered schedule (static) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 23 |
!$omp end single |
|
!$omp do ordered schedule (static, 1) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 28 |
!$omp end single |
|
!$omp do ordered schedule (static, 3) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 2 |
!$omp end ordered |
end do |
|
!$omp single |
j = 79 |
!$omp end single |
|
!$omp do ordered schedule (static, 6) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 4 |
!$omp end ordered |
end do |
|
!$omp single |
j = 125 |
!$omp end single |
|
!$omp do ordered schedule (static, 2) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 12 |
!$omp end ordered |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b) .or. k) call abort |
a = -1 |
k = .false. |
j = 8 |
!$omp parallel num_threads (4) |
|
!$omp do ordered schedule (dynamic) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 23 |
!$omp end single |
|
!$omp do ordered schedule (dynamic, 4) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 28 |
!$omp end single |
|
!$omp do ordered schedule (dynamic, 1) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 2 |
!$omp end ordered |
end do |
|
!$omp single |
j = 79 |
!$omp end single |
|
!$omp do ordered schedule (dynamic, 2) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 4 |
!$omp end ordered |
end do |
|
!$omp single |
j = 125 |
!$omp end single |
|
!$omp do ordered schedule (dynamic, 3) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 12 |
!$omp end ordered |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b) .or. k) call abort |
a = -1 |
k = .false. |
j = 8 |
!$omp parallel num_threads (4) |
|
!$omp do ordered schedule (guided) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 23 |
!$omp end single |
|
!$omp do ordered schedule (guided, 4) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 28 |
!$omp end single |
|
!$omp do ordered schedule (guided, 1) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 2 |
!$omp end ordered |
end do |
|
!$omp single |
j = 79 |
!$omp end single |
|
!$omp do ordered schedule (guided, 2) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 4 |
!$omp end ordered |
end do |
|
!$omp single |
j = 125 |
!$omp end single |
|
!$omp do ordered schedule (guided, 3) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 12 |
!$omp end ordered |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b) .or. k) call abort |
a = -1 |
k = .false. |
j = 8 |
!$omp parallel num_threads (4) |
|
!$omp do ordered schedule (runtime) |
do i = 8, 15 |
a(i) = 1 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 23 |
!$omp end single |
|
!$omp do ordered schedule (runtime) |
do i = 23, 19, -1 |
a(i) = 2 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 1 |
!$omp end ordered |
end do |
|
!$omp single |
j = 28 |
!$omp end single |
|
!$omp do ordered schedule (runtime) |
do i = 28, 39, 2 |
a(i) = 3 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j + 2 |
!$omp end ordered |
end do |
|
!$omp single |
j = 79 |
!$omp end single |
|
!$omp do ordered schedule (runtime) |
do i = 79, 59, -4 |
a(i) = 4 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 4 |
!$omp end ordered |
end do |
|
!$omp single |
j = 125 |
!$omp end single |
|
!$omp do ordered schedule (runtime) |
do i = 125, 90, -12 |
a(i) = 5 * 256 + i |
!$omp ordered |
if (i .ne. j) k = .true. |
j = j - 12 |
!$omp end ordered |
end do |
|
!$omp end parallel |
|
if (any (a .ne. b) .or. k) call abort |
end |
/libgomp.fortran/threadprivate2.f90
0,0 → 1,94
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
|
module threadprivate2 |
integer, dimension(:,:), allocatable :: foo |
!$omp threadprivate (foo) |
end module threadprivate2 |
|
use omp_lib |
use threadprivate2 |
|
integer, dimension(:), pointer :: bar1 |
integer, dimension(2), target :: bar2 |
common /thrc/ bar1, bar2 |
!$omp threadprivate (/thrc/) |
|
integer, dimension(:), pointer, save :: bar3 => NULL() |
!$omp threadprivate (bar3) |
|
logical :: l |
type tt |
integer :: a |
integer :: b = 32 |
end type tt |
type (tt), save :: baz |
!$omp threadprivate (baz) |
|
l = .false. |
call omp_set_dynamic (.false.) |
call omp_set_num_threads (4) |
|
!$omp parallel num_threads (4) reduction (.or.:l) |
l = allocated (foo) |
allocate (foo (6 + omp_get_thread_num (), 3)) |
l = l.or..not.allocated (foo) |
l = l.or.size (foo).ne.(18 + 3 * omp_get_thread_num ()) |
foo = omp_get_thread_num () + 1 |
|
bar2 = omp_get_thread_num () |
l = l.or.associated (bar3) |
bar1 => bar2 |
l = l.or..not.associated (bar1) |
l = l.or..not.associated (bar1, bar2) |
l = l.or.any (bar1.ne.omp_get_thread_num ()) |
nullify (bar1) |
l = l.or.associated (bar1) |
allocate (bar3 (4)) |
l = l.or..not.associated (bar3) |
bar3 = omp_get_thread_num () - 2 |
|
l = l.or.(baz%b.ne.32) |
baz%a = omp_get_thread_num () * 2 |
baz%b = omp_get_thread_num () * 2 + 1 |
!$omp end parallel |
|
if (l) call abort |
if (.not.allocated (foo)) call abort |
if (size (foo).ne.18) call abort |
if (any (foo.ne.1)) call abort |
|
if (associated (bar1)) call abort |
if (.not.associated (bar3)) call abort |
if (any (bar3 .ne. -2)) call abort |
deallocate (bar3) |
if (associated (bar3)) call abort |
|
!$omp parallel num_threads (4) reduction (.or.:l) |
l = l.or..not.allocated (foo) |
l = l.or.size (foo).ne.(18 + 3 * omp_get_thread_num ()) |
l = l.or.any (foo.ne.(omp_get_thread_num () + 1)) |
if (omp_get_thread_num () .ne. 0) then |
deallocate (foo) |
l = l.or.allocated (foo) |
end if |
|
l = l.or.associated (bar1) |
if (omp_get_thread_num () .ne. 0) then |
l = l.or..not.associated (bar3) |
l = l.or.any (bar3 .ne. omp_get_thread_num () - 2) |
deallocate (bar3) |
end if |
l = l.or.associated (bar3) |
|
l = l.or.(baz%a.ne.(omp_get_thread_num () * 2)) |
l = l.or.(baz%b.ne.(omp_get_thread_num () * 2 + 1)) |
!$omp end parallel |
|
if (l) call abort |
if (.not.allocated (foo)) call abort |
if (size (foo).ne.18) call abort |
if (any (foo.ne.1)) call abort |
deallocate (foo) |
if (allocated (foo)) call abort |
end |
/libgomp.fortran/threadprivate3.f90
0,0 → 1,106
! { dg-do run } |
! { dg-require-effective-target tls_runtime } |
|
module threadprivate3 |
integer, dimension(:,:), pointer :: foo => NULL() |
!$omp threadprivate (foo) |
end module threadprivate3 |
|
use omp_lib |
use threadprivate3 |
|
integer, dimension(:), pointer :: bar1 |
integer, dimension(2), target :: bar2, var |
common /thrc/ bar1, bar2 |
!$omp threadprivate (/thrc/) |
|
integer, dimension(:), pointer, save :: bar3 => NULL() |
!$omp threadprivate (bar3) |
|
logical :: l |
type tt |
integer :: a |
integer :: b = 32 |
end type tt |
type (tt), save :: baz |
!$omp threadprivate (baz) |
|
l = .false. |
call omp_set_dynamic (.false.) |
call omp_set_num_threads (4) |
var = 6 |
|
!$omp parallel num_threads (4) reduction (.or.:l) |
bar2 = omp_get_thread_num () |
l = associated (bar3) |
bar1 => bar2 |
l = l.or..not.associated (bar1) |
l = l.or..not.associated (bar1, bar2) |
l = l.or.any (bar1.ne.omp_get_thread_num ()) |
nullify (bar1) |
l = l.or.associated (bar1) |
allocate (bar3 (4)) |
l = l.or..not.associated (bar3) |
bar3 = omp_get_thread_num () - 2 |
if (omp_get_thread_num () .ne. 0) then |
deallocate (bar3) |
if (associated (bar3)) call abort |
else |
bar1 => var |
end if |
bar2 = omp_get_thread_num () * 6 + 130 |
|
l = l.or.(baz%b.ne.32) |
baz%a = omp_get_thread_num () * 2 |
baz%b = omp_get_thread_num () * 2 + 1 |
!$omp end parallel |
|
if (l) call abort |
if (.not.associated (bar1)) call abort |
if (any (bar1.ne.6)) call abort |
if (.not.associated (bar3)) call abort |
if (any (bar3 .ne. -2)) call abort |
deallocate (bar3) |
if (associated (bar3)) call abort |
|
allocate (bar3 (10)) |
bar3 = 17 |
|
!$omp parallel copyin (bar1, bar2, bar3, baz) num_threads (4) & |
!$omp& reduction (.or.:l) |
l = l.or..not.associated (bar1) |
l = l.or.any (bar1.ne.6) |
l = l.or.any (bar2.ne.130) |
l = l.or..not.associated (bar3) |
l = l.or.size (bar3).ne.10 |
l = l.or.any (bar3.ne.17) |
allocate (bar1 (4)) |
bar1 = omp_get_thread_num () |
bar2 = omp_get_thread_num () + 8 |
|
l = l.or.(baz%a.ne.0) |
l = l.or.(baz%b.ne.1) |
baz%a = omp_get_thread_num () * 3 + 4 |
baz%b = omp_get_thread_num () * 3 + 5 |
|
!$omp barrier |
if (omp_get_thread_num () .eq. 0) then |
deallocate (bar3) |
end if |
bar3 => bar2 |
!$omp barrier |
|
l = l.or..not.associated (bar1) |
l = l.or..not.associated (bar3) |
l = l.or.any (bar1.ne.omp_get_thread_num ()) |
l = l.or.size (bar1).ne.4 |
l = l.or.any (bar2.ne.omp_get_thread_num () + 8) |
l = l.or.any (bar3.ne.omp_get_thread_num () + 8) |
l = l.or.size (bar3).ne.2 |
|
l = l.or.(baz%a .ne. omp_get_thread_num () * 3 + 4) |
l = l.or.(baz%b .ne. omp_get_thread_num () * 3 + 5) |
!$omp end parallel |
|
if (l) call abort |
end |
/libgomp.fortran/condinc2.f
0,0 → 1,7
! { dg-options "-fno-openmp" } |
program condinc2 |
logical l |
l = .true. |
C$ include 'condinc1.inc' |
return |
end |
/libgomp.fortran/nestedfn1.f90
0,0 → 1,43
! { dg-do run } |
|
integer :: a, b, c |
a = 1 |
b = 2 |
c = 3 |
call foo |
if (a .ne. 7) call abort |
contains |
subroutine foo |
use omp_lib |
logical :: l |
l = .false. |
!$omp parallel shared (a) private (b) firstprivate (c) & |
!$omp num_threads (2) reduction (.or.:l) |
if (a .ne. 1 .or. c .ne. 3) l = .true. |
!$omp barrier |
if (omp_get_thread_num () .eq. 0) then |
a = 4 |
b = 5 |
c = 6 |
end if |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) then |
if (a .ne. 4 .or. c .ne. 3) l = .true. |
a = 7 |
b = 8 |
c = 9 |
else if (omp_get_num_threads () .eq. 1) then |
a = 7 |
end if |
!$omp barrier |
if (omp_get_thread_num () .eq. 0) then |
if (a .ne. 7 .or. b .ne. 5 .or. c .ne. 6) l = .true. |
end if |
!$omp barrier |
if (omp_get_thread_num () .eq. 1) then |
if (a .ne. 7 .or. b .ne. 8 .or. c .ne. 9) l = .true. |
end if |
!$omp end parallel |
if (l) call abort |
end subroutine foo |
end |
/libgomp.fortran/lib1.f90
0,0 → 1,76
! { dg-do run } |
|
use omp_lib |
|
double precision :: d, e |
logical :: l |
integer (kind = omp_lock_kind) :: lck |
integer (kind = omp_nest_lock_kind) :: nlck |
|
d = omp_get_wtime () |
|
call omp_init_lock (lck) |
call omp_set_lock (lck) |
if (omp_test_lock (lck)) call abort |
call omp_unset_lock (lck) |
if (.not. omp_test_lock (lck)) call abort |
if (omp_test_lock (lck)) call abort |
call omp_unset_lock (lck) |
call omp_destroy_lock (lck) |
|
call omp_init_nest_lock (nlck) |
if (omp_test_nest_lock (nlck) .ne. 1) call abort |
call omp_set_nest_lock (nlck) |
if (omp_test_nest_lock (nlck) .ne. 3) call abort |
call omp_unset_nest_lock (nlck) |
call omp_unset_nest_lock (nlck) |
if (omp_test_nest_lock (nlck) .ne. 2) call abort |
call omp_unset_nest_lock (nlck) |
call omp_unset_nest_lock (nlck) |
call omp_destroy_nest_lock (nlck) |
|
call omp_set_dynamic (.true.) |
if (.not. omp_get_dynamic ()) call abort |
call omp_set_dynamic (.false.) |
if (omp_get_dynamic ()) call abort |
|
call omp_set_nested (.true.) |
if (.not. omp_get_nested ()) call abort |
call omp_set_nested (.false.) |
if (omp_get_nested ()) call abort |
|
call omp_set_num_threads (5) |
if (omp_get_num_threads () .ne. 1) call abort |
if (omp_get_max_threads () .ne. 5) call abort |
if (omp_get_thread_num () .ne. 0) call abort |
call omp_set_num_threads (3) |
if (omp_get_num_threads () .ne. 1) call abort |
if (omp_get_max_threads () .ne. 3) call abort |
if (omp_get_thread_num () .ne. 0) call abort |
l = .false. |
!$omp parallel reduction (.or.:l) |
l = omp_get_num_threads () .ne. 3 |
l = l .or. (omp_get_thread_num () .lt. 0) |
l = l .or. (omp_get_thread_num () .ge. 3) |
!$omp master |
l = l .or. (omp_get_thread_num () .ne. 0) |
!$omp end master |
!$omp end parallel |
if (l) call abort |
|
if (omp_get_num_procs () .le. 0) call abort |
if (omp_in_parallel ()) call abort |
!$omp parallel reduction (.or.:l) |
l = .not. omp_in_parallel () |
!$omp end parallel |
!$omp parallel reduction (.or.:l) if (.true.) |
l = .not. omp_in_parallel () |
!$omp end parallel |
|
e = omp_get_wtime () |
if (d .gt. e) call abort |
d = omp_get_wtick () |
! Negative precision is definitely wrong, |
! bigger than 1s clock resolution is also strange |
if (d .le. 0 .or. d .gt. 1.) call abort |
end |
/libgomp.fortran/nestedfn2.f90
0,0 → 1,34
! { dg-do run } |
|
integer :: i |
common /c/ i |
i = -1 |
!$omp parallel shared (i) num_threads (4) |
call test1 |
!$omp end parallel |
end |
subroutine test1 |
integer :: vari |
call test2 |
call test3 |
contains |
subroutine test2 |
use omp_lib |
integer :: i |
common /c/ i |
!$omp single |
i = omp_get_thread_num () |
call test4 |
!$omp end single copyprivate (vari) |
end subroutine test2 |
subroutine test3 |
integer :: i |
common /c/ i |
if (i .lt. 0 .or. i .ge. 4) call abort |
if (i + 10 .ne. vari) call abort |
end subroutine test3 |
subroutine test4 |
use omp_lib |
vari = omp_get_thread_num () + 10 |
end subroutine test4 |
end subroutine test1 |
/libgomp.fortran/pr27916-1.f90
0,0 → 1,26
! PR fortran/27916 |
! Test whether allocatable privatized arrays has "not currently allocated" |
! status at the start of OpenMP constructs. |
! { dg-do run } |
|
program pr27916 |
integer :: n, i |
logical :: r |
integer, dimension(:), allocatable :: a |
|
r = .false. |
!$omp parallel do num_threads (4) private (n, a, i) & |
!$omp & reduction (.or.: r) schedule (static) |
do n = 1, 16 |
r = r .or. allocated (a) |
allocate (a (16)) |
r = r .or. .not. allocated (a) |
do i = 1, 16 |
a (i) = i |
end do |
deallocate (a) |
r = r .or. allocated (a) |
end do |
!$omp end parallel do |
if (r) call abort |
end program pr27916 |
/libgomp.fortran/nestedfn3.f90
0,0 → 1,24
! PR middle-end/28790 |
! { dg-do run } |
|
program nestomp |
integer :: j |
j = 8 |
call bar |
if (j.ne.10) call abort |
contains |
subroutine foo (i) |
integer :: i |
!$omp atomic |
j = j + i - 5 |
end subroutine |
subroutine bar |
use omp_lib |
integer :: i |
i = 6 |
call omp_set_dynamic (.false.) |
!$omp parallel num_threads (2) |
call foo(i) |
!$omp end parallel |
end subroutine |
end |
/libgomp.fortran/pr27916-2.f90
0,0 → 1,26
! PR fortran/27916 |
! Test whether allocatable privatized arrays has "not currently allocated" |
! status at the start of OpenMP constructs. |
! { dg-do run } |
|
program pr27916 |
integer :: n, i |
logical :: r |
integer, dimension(:), allocatable :: a |
|
r = .false. |
!$omp parallel do num_threads (4) default (private) & |
!$omp & reduction (.or.: r) schedule (static) |
do n = 1, 16 |
r = r .or. allocated (a) |
allocate (a (16)) |
r = r .or. .not. allocated (a) |
do i = 1, 16 |
a (i) = i |
end do |
deallocate (a) |
r = r .or. allocated (a) |
end do |
!$omp end parallel do |
if (r) call abort |
end program pr27916 |
/libgomp.fortran/condinc3.f90
0,0 → 1,7
! { dg-options "-fopenmp" } |
program condinc3 |
logical l |
l = .false. |
!$ include 'condinc1.inc' |
stop 2 |
end |
/libgomp.fortran/condinc4.f90
0,0 → 1,7
! { dg-options "-fno-openmp" } |
program condinc4 |
logical l |
l = .true. |
!$ include 'condinc1.inc' |
return |
end |
/libgomp.fortran/pr25162.f
0,0 → 1,40
C PR fortran/25162 |
C { dg-do run } |
C { dg-require-effective-target tls_runtime } |
PROGRAM PR25162 |
CALL TEST1 |
CALL TEST2 |
END |
SUBROUTINE TEST1 |
DOUBLE PRECISION BPRIM |
COMMON /TESTCOM/ BPRIM(100) |
C$OMP THREADPRIVATE(/TESTCOM/) |
INTEGER I |
DO I = 1, 100 |
BPRIM( I ) = DBLE( I ) |
END DO |
RETURN |
END |
SUBROUTINE TEST2 |
DOUBLE PRECISION BPRIM |
COMMON /TESTCOM/ BPRIM(100) |
C$OMP THREADPRIVATE(/TESTCOM/) |
INTEGER I, IDUM(50) |
DO I = 1, 50 |
IDUM(I) = I |
END DO |
C$OMP PARALLEL COPYIN(/TESTCOM/) NUM_THREADS(4) |
CALL TEST3 |
C$OMP END PARALLEL |
RETURN |
END |
SUBROUTINE TEST3 |
DOUBLE PRECISION BPRIM |
COMMON /TESTCOM/ BPRIM(100) |
C$OMP THREADPRIVATE(/TESTCOM/) |
INTEGER K |
DO K = 1, 10 |
IF (K.NE.BPRIM(K)) CALL ABORT |
END DO |
RETURN |
END |
/libgomp.fortran/pr28390.f
0,0 → 1,8
! PR fortran/28390 |
program pr28390 |
integer i |
!$omp parallel do lastprivate(i) |
do i=1,100 |
end do |
if (i.ne.101) call abort |
end |
/libgomp.fortran/reference1.f90
0,0 → 1,34
! { dg-do run } |
!$ use omp_lib |
|
integer :: i, j, k |
double precision :: d |
i = 6 |
j = 19 |
k = 0 |
d = 24.5 |
call test (i, j, k, d) |
if (i .ne. 38) call abort |
if (iand (k, 255) .ne. 0) call abort |
if (iand (k, 65280) .eq. 0) then |
if (k .ne. 65536 * 4) call abort |
end if |
contains |
subroutine test (i, j, k, d) |
integer :: i, j, k |
double precision :: d |
|
!$omp parallel firstprivate (d) private (j) num_threads (4) reduction (+:k) |
if (i .ne. 6 .or. d .ne. 24.5 .or. k .ne. 0) k = k + 1 |
if (omp_get_num_threads () .ne. 4) k = k + 256 |
d = d / 2 |
j = 8 |
k = k + 65536 |
!$omp barrier |
if (d .ne. 12.25 .or. j .ne. 8) k = k + 1 |
!$omp single |
i = i + 32 |
!$omp end single nowait |
!$omp end parallel |
end subroutine test |
end |
/libgomp.fortran/reference2.f90
0,0 → 1,21
! { dg-do run } |
real, dimension (5) :: b |
b = 5 |
call foo (b) |
contains |
subroutine foo (a) |
real, dimension (5) :: a |
logical :: l |
l = .false. |
!$omp parallel private (a) reduction (.or.:l) |
a = 15 |
l = bar (a) |
!$omp end parallel |
if (l) call abort |
end subroutine |
function bar (a) |
real, dimension (5) :: a |
logical :: bar |
bar = any (a .ne. 15) |
end function |
end |
/libgomp.fortran/omp_workshare1.f
0,0 → 1,48
C****************************************************************************** |
C FILE: omp_workshare1.f |
C DESCRIPTION: |
C OpenMP Example - Loop Work-sharing - Fortran Version |
C In this example, the iterations of a loop are scheduled dynamically |
C across the team of threads. A thread will perform CHUNK iterations |
C at a time before being scheduled for the next CHUNK of work. |
C AUTHOR: Blaise Barney 5/99 |
C LAST REVISED: 01/09/04 |
C****************************************************************************** |
|
PROGRAM WORKSHARE1 |
|
INTEGER NTHREADS, TID, OMP_GET_NUM_THREADS, |
+ OMP_GET_THREAD_NUM, N, CHUNKSIZE, CHUNK, I |
PARAMETER (N=100) |
PARAMETER (CHUNKSIZE=10) |
REAL A(N), B(N), C(N) |
|
! Some initializations |
DO I = 1, N |
A(I) = I * 1.0 |
B(I) = A(I) |
ENDDO |
CHUNK = CHUNKSIZE |
|
!$OMP PARALLEL SHARED(A,B,C,NTHREADS,CHUNK) PRIVATE(I,TID) |
|
TID = OMP_GET_THREAD_NUM() |
IF (TID .EQ. 0) THEN |
NTHREADS = OMP_GET_NUM_THREADS() |
PRINT *, 'Number of threads =', NTHREADS |
END IF |
PRINT *, 'Thread',TID,' starting...' |
|
!$OMP DO SCHEDULE(DYNAMIC,CHUNK) |
DO I = 1, N |
C(I) = A(I) + B(I) |
WRITE(*,100) TID,I,C(I) |
100 FORMAT(' Thread',I2,': C(',I3,')=',F8.2) |
ENDDO |
!$OMP END DO NOWAIT |
|
PRINT *, 'Thread',TID,' done.' |
|
!$OMP END PARALLEL |
|
END |
/libgomp.fortran/omp_orphan.f
0,0 → 1,44
C****************************************************************************** |
C FILE: omp_orphan.f |
C DESCRIPTION: |
C OpenMP Example - Parallel region with an orphaned directive - Fortran |
C Version |
C This example demonstrates a dot product being performed by an orphaned |
C loop reduction construct. Scoping of the reduction variable is critical. |
C AUTHOR: Blaise Barney 5/99 |
C LAST REVISED: |
C****************************************************************************** |
|
PROGRAM ORPHAN |
COMMON /DOTDATA/ A, B, SUM |
INTEGER I, VECLEN |
PARAMETER (VECLEN = 100) |
REAL*8 A(VECLEN), B(VECLEN), SUM |
|
DO I=1, VECLEN |
A(I) = 1.0 * I |
B(I) = A(I) |
ENDDO |
SUM = 0.0 |
!$OMP PARALLEL |
CALL DOTPROD |
!$OMP END PARALLEL |
WRITE(*,*) "Sum = ", SUM |
END |
|
|
|
SUBROUTINE DOTPROD |
COMMON /DOTDATA/ A, B, SUM |
INTEGER I, TID, OMP_GET_THREAD_NUM, VECLEN |
PARAMETER (VECLEN = 100) |
REAL*8 A(VECLEN), B(VECLEN), SUM |
|
TID = OMP_GET_THREAD_NUM() |
!$OMP DO REDUCTION(+:SUM) |
DO I=1, VECLEN |
SUM = SUM + (A(I)*B(I)) |
PRINT *, ' TID= ',TID,'I= ',I |
ENDDO |
RETURN |
END |
/libgomp.fortran/omp_hello.f
0,0 → 1,36
C****************************************************************************** |
C FILE: omp_hello.f |
C DESCRIPTION: |
C OpenMP Example - Hello World - Fortran Version |
C In this simple example, the master thread forks a parallel region. |
C All threads in the team obtain their unique thread number and print it. |
C The master thread only prints the total number of threads. Two OpenMP |
C library routines are used to obtain the number of threads and each |
C thread's number. |
C AUTHOR: Blaise Barney 5/99 |
C LAST REVISED: |
C****************************************************************************** |
|
PROGRAM HELLO |
|
INTEGER NTHREADS, TID, OMP_GET_NUM_THREADS, |
+ OMP_GET_THREAD_NUM |
|
C Fork a team of threads giving them their own copies of variables |
!$OMP PARALLEL PRIVATE(NTHREADS, TID) |
|
|
C Obtain thread number |
TID = OMP_GET_THREAD_NUM() |
PRINT *, 'Hello World from thread = ', TID |
|
C Only master thread does this |
IF (TID .EQ. 0) THEN |
NTHREADS = OMP_GET_NUM_THREADS() |
PRINT *, 'Number of threads = ', NTHREADS |
END IF |
|
C All threads join master thread and disband |
!$OMP END PARALLEL |
|
END |
/libgomp.fortran/omp_cond2.f
0,0 → 1,22
c Test conditional compilation in fixed form if -fno-openmp |
! { dg-options "-fno-openmp" } |
10 foo = 2 |
&56 |
if (foo.ne.256) call abort |
bar = 26 |
!$2 0 ba |
c$ +r = 42 |
!$ bar = 62 |
!$ bar = bar + 1 |
if (bar.ne.26) call abort |
baz = bar |
*$ 0baz = 5 |
C$ +12! Comment |
c$ !4 |
!$ +!Another comment |
*$ &2 |
!$ X baz = 0 ! Not valid OpenMP conditional compilation lines |
! $ baz = 1 |
c$ 10&baz = 2 |
if (baz.ne.26) call abort |
end |
/libgomp.fortran/omp_atomic1.f90
0,0 → 1,39
! { dg-do run } |
integer (kind = 4) :: a |
integer (kind = 2) :: b |
real :: c, f |
double precision :: d |
integer, dimension (10) :: e |
a = 1 |
b = 2 |
c = 3 |
d = 4 |
e = 5 |
f = 6 |
!$omp atomic |
a = a + 4 |
!$omp atomic |
b = 4 - b |
!$omp atomic |
c = c * 2 |
!$omp atomic |
d = 2 / d |
if (a .ne. 5 .or. b .ne. 2 .or. c .ne. 6 .or. d .ne. 0.5) call abort |
d = 1.2 |
!$omp atomic |
a = a + c + d |
!$omp atomic |
b = b - (a + c + d) |
if (a .ne. 12 .or. b .ne. -17) call abort |
!$omp atomic |
a = c + d + a |
!$omp atomic |
b = a + c + d - b |
if (a .ne. 19 .or. b .ne. 43) call abort |
!$omp atomic |
b = (a + c + d) - b |
a = 32 |
!$omp atomic |
a = a / 3.4 |
if (a .ne. 9 .or. b .ne. -16) call abort |
end |
/libgomp.fortran/omp_atomic2.f90
0,0 → 1,54
! { dg-do run } |
real, dimension (20) :: r |
integer, dimension (20) :: d |
integer :: i, j, k, n |
integer (kind = 2) :: a, b, c |
|
do 10 i = 1, 20 |
r(i) = i |
10 d(i) = 21 - i |
|
n = 20 |
call foo (r, d, n) |
|
if (n .ne. 22) call abort |
if (any (r .ne. 33)) call abort |
|
i = 1 |
j = 18 |
k = 23 |
!$omp atomic |
i = min (i, j, k, n) |
if (i .ne. 1) call abort |
!$omp atomic |
i = max (j, n, k, i) |
if (i .ne. 23) call abort |
|
a = 1 |
b = 18 |
c = 23 |
!$omp atomic |
a = min (a, b, c) |
if (a .ne. 1) call abort |
!$omp atomic |
a = max (a, b, c) |
if (a .ne. 23) call abort |
|
contains |
function bar (i) |
real bar |
integer i |
bar = 12.0 + i |
end function bar |
|
subroutine foo (x, y, n) |
integer i, y (*), n |
real x (*) |
do i = 1, n |
!$omp atomic |
x(y(i)) = x(y(i)) + bar (i) |
end do |
!$omp atomic |
n = n + 2 |
end subroutine foo |
end |
/libgomp.fortran/fortran.exp
0,0 → 1,20
set lang_library_path "../libgfortran/.libs" |
set lang_test_file "${lang_library_path}/libgfortranbegin.a" |
set lang_link_flags "-lgfortranbegin -lgfortran" |
|
load_lib libgomp-dg.exp |
|
# Initialize dg. |
dg-init |
|
if [file exists "${blddir}/${lang_test_file}"] { |
|
# Gather a list of all tests. |
set tests [lsort [find $srcdir/$subdir *.\[fF\]{,90,95,03}]] |
|
# Main loop. |
gfortran-dg-runtest $tests "" |
} |
|
# All done. |
dg-finish |
/libgomp.fortran/sharing1.f90
0,0 → 1,29
! { dg-do run } |
|
use omp_lib |
integer :: i, j, k |
logical :: l |
common /b/ i, j |
i = 4 |
j = 8 |
l = .false. |
!$omp parallel private (k) firstprivate (i) shared (j) num_threads (2) & |
!$omp& reduction (.or.:l) |
if (i .ne. 4 .or. j .ne. 8) l = .true. |
!$omp barrier |
k = omp_get_thread_num () |
if (k .eq. 0) then |
i = 14 |
j = 15 |
end if |
!$omp barrier |
if (k .eq. 1) then |
if (i .ne. 4 .or. j .ne. 15) l = .true. |
i = 24 |
j = 25 |
end if |
!$omp barrier |
if (j .ne. 25 .or. i .ne. (k * 10 + 14)) l = .true. |
!$omp end parallel |
if (l .or. j .ne. 25) call abort |
end |
/libgomp.fortran/sharing2.f90
0,0 → 1,32
! { dg-do run } |
|
use omp_lib |
integer :: i, j, k, m, n |
logical :: l |
equivalence (i, m) |
equivalence (j, n) |
i = 4 |
j = 8 |
l = .false. |
!$omp parallel private (k) firstprivate (i) shared (j) num_threads (2) & |
!$omp& reduction (.or.:l) |
l = l .or. i .ne. 4 |
l = l .or. j .ne. 8 |
!$omp barrier |
k = omp_get_thread_num () |
if (k .eq. 0) then |
i = 14 |
j = 15 |
end if |
!$omp barrier |
if (k .eq. 1) then |
if (i .ne. 4 .or. j .ne. 15) l = .true. |
i = 24 |
j = 25 |
end if |
!$omp barrier |
if (j .ne. 25 .or. i .ne. (k * 10 + 14)) l = .true. |
!$omp end parallel |
if (l) call abort |
if (j .ne. 25) call abort |
end |
/libgomp.fortran/lib2.f
0,0 → 1,76
C { dg-do run } |
|
USE OMP_LIB |
|
DOUBLE PRECISION :: D, E |
LOGICAL :: L |
INTEGER (KIND = OMP_LOCK_KIND) :: LCK |
INTEGER (KIND = OMP_NEST_LOCK_KIND) :: NLCK |
|
D = OMP_GET_WTIME () |
|
CALL OMP_INIT_LOCK (LCK) |
CALL OMP_SET_LOCK (LCK) |
IF (OMP_TEST_LOCK (LCK)) CALL ABORT |
CALL OMP_UNSET_LOCK (LCK) |
IF (.NOT. OMP_TEST_LOCK (LCK)) CALL ABORT |
IF (OMP_TEST_LOCK (LCK)) CALL ABORT |
CALL OMP_UNSET_LOCK (LCK) |
CALL OMP_DESTROY_LOCK (LCK) |
|
CALL OMP_INIT_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 1) CALL ABORT |
CALL OMP_SET_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 3) CALL ABORT |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
IF (OMP_TEST_NEST_LOCK (NLCK) .NE. 2) CALL ABORT |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_UNSET_NEST_LOCK (NLCK) |
CALL OMP_DESTROY_NEST_LOCK (NLCK) |
|
CALL OMP_SET_DYNAMIC (.TRUE.) |
IF (.NOT. OMP_GET_DYNAMIC ()) CALL ABORT |
CALL OMP_SET_DYNAMIC (.FALSE.) |
IF (OMP_GET_DYNAMIC ()) CALL ABORT |
|
CALL OMP_SET_NESTED (.TRUE.) |
IF (.NOT. OMP_GET_NESTED ()) CALL ABORT |
CALL OMP_SET_NESTED (.FALSE.) |
IF (OMP_GET_NESTED ()) CALL ABORT |
|
CALL OMP_SET_NUM_THREADS (5) |
IF (OMP_GET_NUM_THREADS () .NE. 1) CALL ABORT |
IF (OMP_GET_MAX_THREADS () .NE. 5) CALL ABORT |
IF (OMP_GET_THREAD_NUM () .NE. 0) CALL ABORT |
CALL OMP_SET_NUM_THREADS (3) |
IF (OMP_GET_NUM_THREADS () .NE. 1) CALL ABORT |
IF (OMP_GET_MAX_THREADS () .NE. 3) CALL ABORT |
IF (OMP_GET_THREAD_NUM () .NE. 0) CALL ABORT |
L = .FALSE. |
C$OMP PARALLEL REDUCTION (.OR.:L) |
L = OMP_GET_NUM_THREADS () .NE. 3 |
L = L .OR. (OMP_GET_THREAD_NUM () .LT. 0) |
L = L .OR. (OMP_GET_THREAD_NUM () .GE. 3) |
C$OMP MASTER |
L = L .OR. (OMP_GET_THREAD_NUM () .NE. 0) |
C$OMP END MASTER |
C$OMP END PARALLEL |
IF (L) CALL ABORT |
|
IF (OMP_GET_NUM_PROCS () .LE. 0) CALL ABORT |
IF (OMP_IN_PARALLEL ()) CALL ABORT |
C$OMP PARALLEL REDUCTION (.OR.:L) |
L = .NOT. OMP_IN_PARALLEL () |
C$OMP END PARALLEL |
C$OMP PARALLEL REDUCTION (.OR.:L) IF (.TRUE.) |
L = .NOT. OMP_IN_PARALLEL () |
C$OMP END PARALLEL |
|
E = OMP_GET_WTIME () |
IF (D .GT. E) CALL ABORT |
D = OMP_GET_WTICK () |
C Negative precision is definitely wrong, |
C bigger than 1s clock resolution is also strange |
IF (D .LE. 0 .OR. D .GT. 1.) CALL ABORT |
END |
/lib/libgomp-dg.exp
0,0 → 1,225
# Damn dejagnu for not having proper library search paths for load_lib. |
# We have to explicitly load everything that gcc-dg.exp wants to load. |
|
proc load_gcc_lib { filename } { |
global srcdir loaded_libs |
|
load_file $srcdir/../../gcc/testsuite/lib/$filename |
set loaded_libs($filename) "" |
} |
|
load_lib dg.exp |
load_gcc_lib file-format.exp |
load_gcc_lib target-supports.exp |
load_gcc_lib target-supports-dg.exp |
load_gcc_lib scanasm.exp |
load_gcc_lib scandump.exp |
load_gcc_lib scanrtl.exp |
load_gcc_lib scantree.exp |
load_gcc_lib scanipa.exp |
load_gcc_lib prune.exp |
load_gcc_lib target-libpath.exp |
load_gcc_lib wrapper.exp |
load_gcc_lib gcc-defs.exp |
load_gcc_lib gcc-dg.exp |
load_gcc_lib gfortran-dg.exp |
|
set dg-do-what-default run |
|
# |
# GCC_UNDER_TEST is the compiler under test. |
# |
|
set libgomp_compile_options "" |
|
# |
# libgomp_init -- This gets run more than it should be.... |
# |
|
if [info exists TOOL_OPTIONS] { |
set multilibs [get_multilibs $TOOL_OPTIONS] |
} else { |
set multilibs [get_multilibs] |
} |
|
proc libgomp_init { args } { |
global srcdir blddir objdir tool_root_dir |
global libgomp_initialized |
global tmpdir |
global gluefile wrap_flags |
global ALWAYS_CFLAGS |
global CFLAGS |
global TOOL_EXECUTABLE TOOL_OPTIONS |
global GCC_UNDER_TEST |
global TESTING_IN_BUILD_TREE |
global target_triplet |
global ld_library_path |
global lang_test_file |
global lang_library_path |
global lang_link_flags |
|
set blddir [lookfor_file [get_multilibs] libgomp] |
|
# We set LC_ALL and LANG to C so that we get the same error |
# messages as expected. |
setenv LC_ALL C |
setenv LANG C |
|
if ![info exists GCC_UNDER_TEST] then { |
if [info exists TOOL_EXECUTABLE] { |
set GCC_UNDER_TEST $TOOL_EXECUTABLE |
} else { |
set GCC_UNDER_TEST "[find_gcc]" |
} |
} |
|
if ![info exists tmpdir] { |
set tmpdir "/tmp" |
} |
|
if [info exists gluefile] { |
unset gluefile |
} |
|
if {![info exists CFLAGS]} { |
set CFLAGS "" |
} |
|
# Locate libgcc.a so we don't need to account for different values of |
# SHLIB_EXT on different platforms |
set gccdir [lookfor_file $tool_root_dir gcc/libgcc.a] |
if {$gccdir != ""} { |
set gccdir [file dirname $gccdir] |
} |
|
# Compute what needs to be put into LD_LIBRARY_PATH |
set ld_library_path ".:${blddir}/.libs" |
|
if { [info exists lang_test_file] && [file exists "${blddir}/"] } { |
append ld_library_path ":${blddir}/${lang_library_path}" |
} |
|
# Compute what needs to be added to the existing LD_LIBRARY_PATH. |
if {$gccdir != ""} { |
append ld_library_path ":${gccdir}" |
set compiler [lindex $GCC_UNDER_TEST 0] |
|
if { [is_remote host] == 0 && [which $compiler] != 0 } { |
foreach i "[exec $compiler --print-multi-lib]" { |
set mldir "" |
regexp -- "\[a-z0-9=_/\.-\]*;" $i mldir |
set mldir [string trimright $mldir "\;@"] |
if { "$mldir" == "." } { |
continue |
} |
if { [llength [glob -nocomplain ${gccdir}/${mldir}/libgcc_s*.so.*]] >= 1 } { |
append ld_library_path ":${gccdir}/${mldir}" |
} |
} |
} |
} |
set_ld_library_path_env_vars |
|
set ALWAYS_CFLAGS "" |
lappend ALWAYS_CFLAGS "additional_flags=-B${blddir}/" |
lappend ALWAYS_CFLAGS "additional_flags=-I${blddir}" |
lappend ALWAYS_CFLAGS "additional_flags=-I${srcdir}/.." |
lappend ALWAYS_CFLAGS "ldflags=-L${blddir}/.libs -lgomp" |
if { [info exists lang_test_file] && [file exists "${blddir}/"] } { |
lappend ALWAYS_CFLAGS "ldflags=-L${blddir}/${lang_library_path} ${lang_link_flags}" |
} |
|
# We use atomic operations in the testcases to validate results. |
if [istarget i?86-*-*] { |
lappend ALWAYS_CFLAGS "additional_flags=-march=i486" |
} |
if [istarget sparc*-*-*] { |
lappend ALWAYS_CFLAGS "additional_flags=-mcpu=v9" |
} |
|
if [info exists TOOL_OPTIONS] { |
lappend ALWAYS_CFLAGS "additional_flags=$TOOL_OPTIONS" |
} |
|
# Make sure that lines are not wrapped. That can confuse the |
# error-message parsing machinery. |
lappend ALWAYS_CFLAGS "additional_flags=-fmessage-length=0" |
|
# And, gee, turn on OpenMP. |
lappend ALWAYS_CFLAGS "additional_flags=-fopenmp" |
} |
|
# |
# libgomp_target_compile -- compile a source file |
# |
|
proc libgomp_target_compile { source dest type options } { |
global tmpdir |
global libgomp_compile_options |
global gluefile wrap_flags |
global ALWAYS_CFLAGS |
global GCC_UNDER_TEST |
|
libgomp_init |
|
if { [target_info needs_status_wrapper] != "" && [info exists gluefile] } { |
lappend options "libs=${gluefile}" |
lappend options "ldflags=${wrap_flags}" |
} |
|
lappend options "additional_flags=[libio_include_flags]" |
lappend options "compiler=$GCC_UNDER_TEST" |
|
set options [concat $libgomp_compile_options $options] |
|
set options [concat "$ALWAYS_CFLAGS" $options] |
|
set options [dg-additional-files-options $options $source] |
|
set result [target_compile $source $dest $type $options] |
|
return $result |
} |
|
# ??? The same as in standard.exp. Why doesn't anyone else have to |
# define this? |
|
proc libgomp_load { program args } { |
if { [llength $args] > 0 } { |
set program_args [lindex $args 0] |
} else { |
set program_args "" |
} |
|
if { [llength $args] > 1 } { |
set input_file [lindex $args 1] |
} else { |
set input_file "" |
} |
return [remote_load target $program $program_args $input_file] |
} |
|
proc libgomp_option_help { } { |
send_user " --additional_options,OPTIONS\t\tUse OPTIONS to compile the testcase files. OPTIONS should be comma-separated.\n" |
} |
|
proc libgomp_option_proc { option } { |
if [regexp "^--additional_options," $option] { |
global libgomp_compile_options |
regsub "--additional_options," $option "" option |
foreach x [split $option ","] { |
lappend libgomp_compile_options "additional_flags=$x" |
} |
return 1 |
} else { |
return 0 |
} |
} |
|
proc libgomp-dg-test { prog do_what extra_tool_flags } { |
return [gcc-dg-test-1 libgomp_target_compile $prog $do_what $extra_tool_flags] |
} |
|
proc libgomp-dg-prune { system text } { |
return [gcc-dg-prune $system $text] |
} |
/Makefile.am
0,0 → 1,13
## Process this file with automake to produce Makefile.in. |
|
AUTOMAKE_OPTIONS = foreign dejagnu |
|
# May be used by various substitution variables. |
gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) |
|
EXPECT = $(shell if test -f $(top_builddir)/../expect/expect; then \ |
echo $(top_builddir)/../expect/expect; else echo expect; fi) |
|
_RUNTEST = $(shell if test -f $(top_srcdir)/../dejagnu/runtest; then \ |
echo $(top_srcdir)/../dejagnu/runtest; else echo runtest; fi) |
RUNTEST = "$(_RUNTEST) $(AM_RUNTESTFLAGS)" |
/libgomp.c/appendix-a/a.2.1.c
0,0 → 1,45
/* { dg-do run } */ |
|
#include <stdio.h> |
#include <omp.h> |
extern void abort (void); |
int |
main () |
{ |
int bad, x; |
x = 2; |
bad = 0; |
#pragma omp parallel num_threads(2) shared(x, bad) |
{ |
if (omp_get_thread_num () == 0) |
{ |
volatile int i; |
for (i = 0; i < 100000000; i++) |
x = 5; |
} |
else |
{ |
/* Print 1: the following read of x has a race */ |
if (x != 2 && x != 5) |
bad = 1; |
} |
#pragma omp barrier |
if (omp_get_thread_num () == 0) |
{ |
/* x must be 5 now. */ |
if (x != 5) |
bad = 1; |
} |
else |
{ |
/* x must be 5 now. */ |
if (x != 5) |
bad = 1; |
} |
} |
|
if (bad) |
abort (); |
|
return 0; |
} |
/libgomp.c/appendix-a/a.21.1.c
0,0 → 1,25
/* { dg-do run } */ |
|
#include <stdio.h> |
void |
work (int k) |
{ |
#pragma omp ordered |
printf (" %d\n", k); |
} |
|
void |
a21 (int lb, int ub, int stride) |
{ |
int i; |
#pragma omp parallel for ordered schedule(dynamic) |
for (i = lb; i < ub; i += stride) |
work (i); |
} |
|
int |
main () |
{ |
a21 (0, 100, 5); |
return 0; |
} |
/libgomp.c/appendix-a/a.3.1.c
0,0 → 1,11
/* { dg-do run } */ |
|
#include <stdio.h> |
int |
main () |
{ |
# ifdef _OPENMP |
printf ("Compiled by an OpenMP-compliant implementation.\n"); |
# endif |
return 0; |
} |
/libgomp.c/appendix-a/a.4.1.c
0,0 → 1,38
/* { dg-do run } */ |
|
#include <omp.h> |
extern void abort (void); |
void |
subdomain (float *x, int istart, int ipoints) |
{ |
int i; |
for (i = 0; i < ipoints; i++) |
x[istart + i] = 123.456; |
} |
|
void |
sub (float *x, int npoints) |
{ |
int iam, nt, ipoints, istart; |
#pragma omp parallel default(shared) private(iam,nt,ipoints,istart) |
{ |
iam = omp_get_thread_num (); |
nt = omp_get_num_threads (); |
ipoints = npoints / nt; /* size of partition */ |
istart = iam * ipoints; /* starting array index */ |
if (iam == nt - 1) /* last thread may do more */ |
ipoints = npoints - istart; |
subdomain (x, istart, ipoints); |
} |
} |
int |
main () |
{ |
int i; |
float array[10000]; |
sub (array, 10000); |
for (i = 0; i < 10000; i++) |
if (array[i] < 123.45 || array[i] > 123.46) |
abort (); |
return 0; |
} |
/libgomp.c/appendix-a/a.40.1.c
0,0 → 1,48
/* { dg-do compile } */ |
|
#include <omp.h> |
typedef struct |
{ |
int a, b; |
omp_nest_lock_t lck; |
} pair; |
int work1 (); |
int work2 (); |
int work3 (); |
void |
incr_a (pair * p, int a) |
{ |
/* Called only from incr_pair, no need to lock. */ |
p->a += a; |
} |
|
void |
incr_b (pair * p, int b) |
{ |
/* Called both from incr_pair and elsewhere, */ |
/* so need a nestable lock. */ |
omp_set_nest_lock (&p->lck); |
p->b += b; |
omp_unset_nest_lock (&p->lck); |
} |
|
void |
incr_pair (pair * p, int a, int b) |
{ |
omp_set_nest_lock (&p->lck); |
incr_a (p, a); |
incr_b (p, b); |
omp_unset_nest_lock (&p->lck); |
} |
|
void |
a40 (pair * p) |
{ |
#pragma omp parallel sections |
{ |
#pragma omp section |
incr_pair (p, work1 (), work2 ()); |
#pragma omp section |
incr_b (p, work3 ()); |
} |
} |
/libgomp.c/appendix-a/a.5.1.c
0,0 → 1,13
/* { dg-do run } */ |
|
#include <omp.h> |
int |
main () |
{ |
omp_set_dynamic (1); |
#pragma omp parallel num_threads(10) |
{ |
/* do work here */ |
} |
return 0; |
} |
/libgomp.c/appendix-a/a.15.1.c
0,0 → 1,44
/* { dg-do run } */ |
|
#include <stdio.h> |
|
void |
work (int n) |
{ |
printf ("[%d of %d], nested = %d, n = %d\n", omp_get_thread_num (), omp_get_num_threads(), omp_get_nested (), n); |
} |
|
void |
sub3 (int n) |
{ |
work (n); |
#pragma omp barrier |
work (n); |
} |
|
void |
sub2 (int k) |
{ |
#pragma omp parallel shared(k) |
sub3 (k); |
} |
|
void |
sub1 (int n) |
{ |
int i; |
#pragma omp parallel private(i) shared(n) |
{ |
#pragma omp for |
for (i = 0; i < n; i++) |
sub2 (i); |
} |
} |
int |
main () |
{ |
sub1 (2); |
sub2 (15); |
sub3 (20); |
return 0; |
} |
/libgomp.c/appendix-a/a.16.1.c
0,0 → 1,47
/* { dg-do run } */ |
|
#include <stdio.h> |
|
float |
work1 (int i) |
{ |
return 1.0 * i; |
} |
|
float |
work2 (int i) |
{ |
return 2.0 * i; |
} |
|
void |
a16 (float *x, float *y, int *index, int n) |
{ |
int i; |
#pragma omp parallel for shared(x, y, index, n) |
for (i = 0; i < n; i++) |
{ |
#pragma omp atomic |
x[index[i]] += work1 (i); |
y[i] += work2 (i); |
} |
} |
int |
main () |
{ |
float x[1000]; |
float y[10000]; |
int index[10000]; |
int i; |
for (i = 0; i < 10000; i++) |
{ |
index[i] = i % 1000; |
y[i] = 0.0; |
} |
for (i = 0; i < 1000; i++) |
x[i] = 0.0; |
a16 (x, y, index, 10000); |
for (i = 0; i < 10; i++) |
printf ("x[%d] = %f, y[%d] = %f\n", i, x[i], i, y[i]); |
return 0; |
} |
/libgomp.c/appendix-a/a.26.1.c
0,0 → 1,17
/* { dg-do run } */ |
|
#include <stdio.h> |
int |
main () |
{ |
int i, j; |
i = 1; |
j = 2; |
#pragma omp parallel private(i) firstprivate(j) |
{ |
i = 3; |
j = j + 2; |
} |
printf ("%d %d\n", i, j); /* i and j are undefined */ |
return 0; |
} |
/libgomp.c/appendix-a/a.33.3.c
0,0 → 1,16
/* { dg-do compile } */ |
|
#include <stdio.h> |
#include <stdlib.h> |
#include <omp.h> |
omp_lock_t * |
new_lock () |
{ |
omp_lock_t *lock_ptr; |
#pragma omp single copyprivate(lock_ptr) |
{ |
lock_ptr = (omp_lock_t *) malloc (sizeof (omp_lock_t)); |
omp_init_lock (lock_ptr); |
} |
return lock_ptr; |
} |
/libgomp.c/appendix-a/a.18.1.c
0,0 → 1,67
/* { dg-do run } */ |
|
#include <omp.h> |
#include <stdio.h> |
|
extern void abort (void); |
|
#define NUMBER_OF_THREADS 4 |
|
int synch[NUMBER_OF_THREADS]; |
int work[NUMBER_OF_THREADS]; |
int result[NUMBER_OF_THREADS]; |
int |
fn1 (int i) |
{ |
return i * 2; |
} |
|
int |
fn2 (int a, int b) |
{ |
return a + b; |
} |
|
int |
main () |
{ |
int i, iam, neighbor; |
omp_set_num_threads (NUMBER_OF_THREADS); |
#pragma omp parallel private(iam,neighbor) shared(work,synch) |
{ |
iam = omp_get_thread_num (); |
synch[iam] = 0; |
#pragma omp barrier |
/*Do computation into my portion of work array */ |
work[iam] = fn1 (iam); |
/* Announce that I am done with my work. The first flush |
* ensures that my work is made visible before synch. |
* The second flush ensures that synch is made visible. |
*/ |
#pragma omp flush(work,synch) |
synch[iam] = 1; |
#pragma omp flush(synch) |
/* Wait for neighbor. The first flush ensures that synch is read |
* from memory, rather than from the temporary view of memory. |
* The second flush ensures that work is read from memory, and |
* is done so after the while loop exits. |
*/ |
neighbor = (iam > 0 ? iam : omp_get_num_threads ()) - 1; |
while (synch[neighbor] == 0) |
{ |
#pragma omp flush(synch) |
} |
#pragma omp flush(work,synch) |
/* Read neighbor's values of work array */ |
result[iam] = fn2 (work[neighbor], work[iam]); |
} |
/* output result here */ |
for (i = 0; i < NUMBER_OF_THREADS; i++) |
{ |
neighbor = (i > 0 ? i : NUMBER_OF_THREADS) - 1; |
if (result[i] != i * 2 + neighbor * 2) |
abort (); |
} |
|
return 0; |
} |
/libgomp.c/appendix-a/a.36.1.c
0,0 → 1,31
/* { dg-do run } */ |
|
#include <omp.h> |
#include <stdlib.h> |
void |
do_by_16 (float *x, int iam, int ipoints) |
{ |
} |
|
void |
a36 (float *x, int npoints) |
{ |
int iam, ipoints; |
omp_set_dynamic (0); |
omp_set_num_threads (16); |
#pragma omp parallel shared(x, npoints) private(iam, ipoints) |
{ |
if (omp_get_num_threads () != 16) |
abort (); |
iam = omp_get_thread_num (); |
ipoints = npoints / 16; |
do_by_16 (x, iam, ipoints); |
} |
} |
|
int main() |
{ |
float a[10]; |
a36 (a, 10); |
return 0; |
} |
/libgomp.c/appendix-a/a.19.1.c
0,0 → 1,55
/* { dg-do run } */ |
|
int x, *p = &x; |
extern void abort (void); |
void |
f1 (int *q) |
{ |
*q = 1; |
#pragma omp flush |
/* x, p, and *q are flushed */ |
/* because they are shared and accessible */ |
/* q is not flushed because it is not shared. */ |
} |
|
void |
f2 (int *q) |
{ |
#pragma omp barrier |
*q = 2; |
#pragma omp barrier |
/* a barrier implies a flush */ |
/* x, p, and *q are flushed */ |
/* because they are shared and accessible */ |
/* q is not flushed because it is not shared. */ |
} |
|
int |
g (int n) |
{ |
int i = 1, j, sum = 0; |
*p = 1; |
#pragma omp parallel reduction(+: sum) num_threads(2) |
{ |
f1 (&j); |
/* i, n and sum were not flushed */ |
/* because they were not accessible in f1 */ |
/* j was flushed because it was accessible */ |
sum += j; |
f2 (&j); |
/* i, n, and sum were not flushed */ |
/* because they were not accessible in f2 */ |
/* j was flushed because it was accessible */ |
sum += i + j + *p + n; |
} |
return sum; |
} |
|
int |
main () |
{ |
int result = g (10); |
if (result != 30) |
abort (); |
return 0; |
} |
/libgomp.c/appendix-a/a.29.1.c
0,0 → 1,30
/* { dg-do run } */ |
|
#include <assert.h> |
int A[2][2] = { 1, 2, 3, 4 }; |
void |
f (int n, int B[n][n], int C[]) |
{ |
int D[2][2] = { 1, 2, 3, 4 }; |
int E[n][n]; |
assert (n >= 2); |
E[1][1] = 4; |
#pragma omp parallel firstprivate(B, C, D, E) |
{ |
assert (sizeof (B) == sizeof (int (*)[n])); |
assert (sizeof (C) == sizeof (int *)); |
assert (sizeof (D) == 4 * sizeof (int)); |
assert (sizeof (E) == n * n * sizeof (int)); |
/* Private B and C have values of original B and C. */ |
assert (&B[1][1] == &A[1][1]); |
assert (&C[3] == &A[1][1]); |
assert (D[1][1] == 4); |
assert (E[1][1] == 4); |
} |
} |
int |
main () |
{ |
f (2, A, A[0]); |
return 0; |
} |
/libgomp.c/appendix-a/a.39.1.c
0,0 → 1,38
/* { dg-do run } */ |
|
#include <stdio.h> |
#include <omp.h> |
void |
skip (int i) |
{ |
} |
|
void |
work (int i) |
{ |
} |
int |
main () |
{ |
omp_lock_t lck; |
int id; |
omp_init_lock (&lck); |
#pragma omp parallel shared(lck) private(id) |
{ |
id = omp_get_thread_num (); |
omp_set_lock (&lck); |
/* only one thread at a time can execute this printf */ |
printf ("My thread id is %d.\n", id); |
omp_unset_lock (&lck); |
while (!omp_test_lock (&lck)) |
{ |
skip (id); /* we do not yet have the lock, |
so we must do something else */ |
} |
work (id); /* we now have the lock |
and can do the work */ |
omp_unset_lock (&lck); |
} |
omp_destroy_lock (&lck); |
return 0; |
} |
/libgomp.c/omp-single-2.c
0,0 → 1,38
#include <omp.h> |
|
extern void abort (void); |
|
struct X |
{ |
int a; |
char b; |
int c; |
}; |
|
main() |
{ |
int i = 0; |
struct X x; |
int bad = 0; |
|
#pragma omp parallel private (i, x) shared (bad) |
{ |
i = 5; |
|
#pragma omp single copyprivate (i, x) |
{ |
i++; |
x.a = 23; |
x.b = 42; |
x.c = 26; |
} |
|
if (i != 6 || x.a != 23 || x.b != 42 || x.c != 26) |
bad = 1; |
} |
|
if (bad) |
abort (); |
|
return 0; |
} |
/libgomp.c/vla-1.c
0,0 → 1,60
/* { dg-do run } */ |
|
#include <omp.h> |
#include <stdlib.h> |
#include <string.h> |
|
int |
main (int argc, char **argv[]) |
{ |
int n = argc < 5 ? 12 : 31, i, m, l; |
char a[n + 3]; |
unsigned short b[n / 2 - 1]; |
int c[n * 2 + 1]; |
|
for (i = 0; i < n + 3; i++) |
a[i] = i; |
for (i = 0; i < n / 2 - 1; i++) |
b[i] = (i << 8) | i; |
for (i = 0; i < n * 2 + 1; i++) |
c[i] = (i << 24) | i; |
l = 0; |
m = n; |
#pragma omp parallel default (shared) num_threads (4) \ |
firstprivate (a, m) private (b, i) reduction (+:l) |
{ |
for (i = 0; i < m + 3; i++) |
if (a[i] != i) |
l++; |
for (i = 0; i < m * 2 + 1; i++) |
if (c[i] != ((i << 24) | i)) |
l++; |
#pragma omp barrier |
memset (a, omp_get_thread_num (), m + 3); |
for (i = 0; i < m / 2 - 1; i++) |
b[i] = a[0] + 7; |
#pragma omp master |
{ |
for (i = 0; i < m * 2 + 1; i++) |
c[i] = a[0] + 16; |
} |
#pragma omp barrier |
if (a[0] != omp_get_thread_num ()) |
l++; |
for (i = 1; i < m + 3; i++) |
if (a[i] != a[0]) |
l++; |
for (i = 0; i < m / 2 - 1; i++) |
if (b[i] != a[0] + 7) |
l++; |
for (i = 0; i < m * 2 + 1; i++) |
if (c[i] != 16) |
l++; |
} |
if (l) |
abort (); |
for (i = 0; i < n * 2 + 1; i++) |
if (c[i] != 16) |
l++; |
return 0; |
} |
/libgomp.c/nested-2.c
0,0 → 1,30
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = -1, j = -1; |
|
omp_set_nested (0); |
omp_set_dynamic (0); |
#pragma omp parallel num_threads (4) |
{ |
#pragma omp single |
{ |
i = omp_get_thread_num () + omp_get_num_threads () * 256; |
#pragma omp parallel num_threads (2) |
{ |
#pragma omp single |
{ |
j = omp_get_thread_num () + omp_get_num_threads () * 256; |
} |
} |
} |
} |
if (i < 4 * 256 || i >= 4 * 256 + 4) |
abort (); |
if (j != 256 + 0) |
abort (); |
return 0; |
} |
/libgomp.c/pr26171.c
0,0 → 1,14
/* PR c/26171 */ |
/* { dg-do run } */ |
/* { dg-options "-fopenmp" } */ |
/* { dg-require-effective-target tls_runtime } */ |
|
int thrv = 0; |
#pragma omp threadprivate (thrv) |
|
int |
main () |
{ |
thrv = 1; |
return 0; |
} |
/libgomp.c/pr24455-1.c
0,0 → 1,6
/* { dg-do compile } */ |
/* { dg-require-effective-target tls } */ |
extern int i; |
#pragma omp threadprivate (i) |
|
int i; |
/libgomp.c/pr24455.c
0,0 → 1,23
/* { dg-do run } */ |
/* { dg-additional-sources pr24455-1.c } */ |
/* { dg-require-effective-target tls_runtime } */ |
|
extern void abort (void); |
|
extern int i; |
#pragma omp threadprivate(i) |
|
int main() |
{ |
i = 0; |
|
#pragma omp parallel default(none) num_threads(10) |
{ |
i++; |
#pragma omp barrier |
if (i != 1) |
abort (); |
} |
|
return 0; |
} |
/libgomp.c/pr30494.c
0,0 → 1,64
/* PR middle-end/30494 */ |
/* { dg-do run } */ |
|
#include <omp.h> |
|
int errors; |
|
int |
check (int m, int i, int *v, int *w) |
{ |
int j; |
int n = omp_get_thread_num (); |
for (j = 0; j < m; j++) |
if (v[j] != j + n) |
#pragma omp atomic |
errors += 1; |
for (j = 0; j < m * 3 + i; j++) |
if (w[j] != j + 10 + n) |
#pragma omp atomic |
errors += 1; |
} |
|
int |
foo (int n, int m) |
{ |
int i; |
#pragma omp for |
for (i = 0; i < 6; i++) |
{ |
int v[n], w[n * 3 + i], j; |
for (j = 0; j < n; j++) |
v[j] = j + omp_get_thread_num (); |
for (j = 0; j < n * 3 + i; j++) |
w[j] = j + 10 + omp_get_thread_num (); |
check (m, i, v, w); |
} |
return 0; |
} |
|
int |
bar (int n, int m) |
{ |
int i; |
#pragma omp parallel for num_threads (4) |
for (i = 0; i < 6; i++) |
{ |
int v[n], w[n * 3 + i], j; |
for (j = 0; j < n; j++) |
v[j] = j + omp_get_thread_num (); |
for (j = 0; j < n * 3 + i; j++) |
w[j] = j + 10 + omp_get_thread_num (); |
check (m, i, v, w); |
} |
return 0; |
} |
|
int |
main (void) |
{ |
#pragma omp parallel num_threads (3) |
foo (128, 128); |
bar (256, 256); |
return 0; |
} |
/libgomp.c/critical-1.c
0,0 → 1,39
/* Trivial test of critical sections. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <sys/time.h> |
#include <unistd.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
static volatile int test = -1; |
|
static void function(void *dummy) |
{ |
int iam = omp_get_thread_num (); |
int old; |
|
GOMP_critical_start (); |
|
old = __sync_lock_test_and_set (&test, iam); |
assert (old == -1); |
|
usleep (10); |
test = -1; |
|
GOMP_critical_end (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
GOMP_parallel_start (function, NULL, 3); |
function (NULL); |
GOMP_parallel_end (); |
|
return 0; |
} |
/libgomp.c/omp_matvec.c
0,0 → 1,72
/****************************************************************************** |
* OpenMP Example - Matrix-vector multiplication - C/C++ Version |
* FILE: omp_matvec.c |
* DESCRIPTION: |
* This example multiplies all row i elements of matrix A with vector |
* element b(i) and stores the summed products in vector c(i). A total is |
* maintained for the entire matrix. Performed by using the OpenMP loop |
* work-sharing construct. The update of the shared global total is |
* serialized by using the OpenMP critical directive. |
* SOURCE: Blaise Barney 5/99 |
* LAST REVISED: |
******************************************************************************/ |
|
#include <omp.h> |
#include <stdio.h> |
#define SIZE 10 |
|
|
main () |
{ |
|
float A[SIZE][SIZE], b[SIZE], c[SIZE], total; |
int i, j, tid; |
|
/* Initializations */ |
total = 0.0; |
for (i=0; i < SIZE; i++) |
{ |
for (j=0; j < SIZE; j++) |
A[i][j] = (j+1) * 1.0; |
b[i] = 1.0 * (i+1); |
c[i] = 0.0; |
} |
printf("\nStarting values of matrix A and vector b:\n"); |
for (i=0; i < SIZE; i++) |
{ |
printf(" A[%d]= ",i); |
for (j=0; j < SIZE; j++) |
printf("%.1f ",A[i][j]); |
printf(" b[%d]= %.1f\n",i,b[i]); |
} |
printf("\nResults by thread/row:\n"); |
|
/* Create a team of threads and scope variables */ |
#pragma omp parallel shared(A,b,c,total) private(tid,i) |
{ |
tid = omp_get_thread_num(); |
|
/* Loop work-sharing construct - distribute rows of matrix */ |
#pragma omp for private(j) |
for (i=0; i < SIZE; i++) |
{ |
for (j=0; j < SIZE; j++) |
c[i] += (A[i][j] * b[i]); |
|
/* Update and display of running total must be serialized */ |
#pragma omp critical |
{ |
total = total + c[i]; |
printf(" thread %d did row %d\t c[%d]=%.2f\t",tid,i,i,c[i]); |
printf("Running total= %.2f\n",total); |
} |
|
} /* end of parallel i loop */ |
|
} /* end of parallel construct */ |
|
printf("\nMatrix-vector total - sum of all c[] = %.2f\n\n",total); |
|
return 0; |
} |
|
/libgomp.c/pr29947-1.c
0,0 → 1,328
/* PR libgomp/29947 */ |
/* { dg-options "-O2 -fopenmp" } */ |
/* { dg-do run } */ |
|
extern void abort (void); |
|
int cnt; |
|
void |
test1 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (dynamic) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test2 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (dynamic) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test3 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (guided) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test4 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (guided) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test5 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (dynamic) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test6 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (dynamic) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test7 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (guided) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test8 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (guided) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test9 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (dynamic) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test10 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (dynamic) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test11 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (guided) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test12 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (guided) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test13 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test14 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (dynamic) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test15 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test16 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (guided) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
int |
__attribute__((noinline)) |
test (long j1, long k1, long j2, long k2) |
{ |
test1 (j1, k1, j2, k2); |
test2 (j1, k1, j2, k2); |
test3 (j1, k1, j2, k2); |
test4 (j1, k1, j2, k2); |
test5 (j1, k1, j2, k2); |
test6 (j1, k1, j2, k2); |
test7 (j1, k1, j2, k2); |
test8 (j1, k1, j2, k2); |
test9 (j1, k1, j2, k2); |
test10 (j1, k1, j2, k2); |
test11 (j1, k1, j2, k2); |
test12 (j1, k1, j2, k2); |
test13 (j1, k1, j2, k2); |
test14 (j1, k1, j2, k2); |
test15 (j1, k1, j2, k2); |
test16 (j1, k1, j2, k2); |
return cnt; |
} |
|
int |
main (void) |
{ |
test (1, 5, 1, 5); |
test (5, 5, 5, 5); |
test (5, 4, 5, 4); |
test (5, 1, 5, 1); |
return 0; |
} |
/libgomp.c/copyin-1.c
0,0 → 1,34
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
/* { dg-require-effective-target tls_runtime } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int thr = 32; |
#pragma omp threadprivate (thr) |
|
int |
main (void) |
{ |
int l = 0; |
|
omp_set_dynamic (0); |
omp_set_num_threads (6); |
|
#pragma omp parallel copyin (thr) reduction (||:l) |
{ |
l = thr != 32; |
thr = omp_get_thread_num () + 11; |
} |
|
if (l || thr != 11) |
abort (); |
|
#pragma omp parallel reduction (||:l) |
l = thr != omp_get_thread_num () + 11; |
|
if (l) |
abort (); |
return 0; |
} |
/libgomp.c/copyin-3.c
0,0 → 1,42
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
/* { dg-require-effective-target tls_runtime } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int thr; |
#pragma omp threadprivate (thr) |
|
int |
test (int l) |
{ |
return l || (thr != omp_get_thread_num () * 2); |
} |
|
int |
main (void) |
{ |
int l = 0; |
|
omp_set_dynamic (0); |
omp_set_num_threads (6); |
|
thr = 8; |
/* Broadcast the value to all threads. */ |
#pragma omp parallel copyin (thr) |
; |
|
#pragma omp parallel reduction (||:l) |
{ |
/* Now test if the broadcast succeeded. */ |
l = thr != 8; |
thr = omp_get_thread_num () * 2; |
#pragma omp barrier |
l = test (l); |
} |
|
if (l) |
abort (); |
return 0; |
} |
/libgomp.c/omp_workshare2.c
0,0 → 1,64
/****************************************************************************** |
* FILE: omp_workshare2.c |
* DESCRIPTION: |
* OpenMP Example - Sections Work-sharing - C/C++ Version |
* In this example, the OpenMP SECTION directive is used to assign |
* different array operations to threads that execute a SECTION. Each |
* thread receives its own copy of the result array to work with. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
#define N 50 |
|
int main (int argc, char *argv[]) { |
|
int i, nthreads, tid; |
float a[N], b[N], c[N]; |
|
/* Some initializations */ |
for (i=0; i<N; i++) |
a[i] = b[i] = i * 1.0; |
|
#pragma omp parallel shared(a,b,nthreads) private(c,i,tid) |
{ |
tid = omp_get_thread_num(); |
if (tid == 0) |
{ |
nthreads = omp_get_num_threads(); |
printf("Number of threads = %d\n", nthreads); |
} |
printf("Thread %d starting...\n",tid); |
|
#pragma omp sections nowait |
{ |
#pragma omp section |
{ |
printf("Thread %d doing section 1\n",tid); |
for (i=0; i<N; i++) |
{ |
c[i] = a[i] + b[i]; |
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); |
} |
} |
|
#pragma omp section |
{ |
printf("Thread %d doing section 2\n",tid); |
for (i=0; i<N; i++) |
{ |
c[i] = a[i] * b[i]; |
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); |
} |
} |
|
} /* end of sections */ |
|
printf("Thread %d done.\n",tid); |
|
} /* end of parallel section */ |
|
return 0; |
} |
/libgomp.c/omp-loop01.c
0,0 → 1,96
#include <stdlib.h> |
#include <stdio.h> |
#include <string.h> |
#include <omp.h> |
|
#define MAX 1000 |
|
void main1() |
{ |
int i, N1, N2, step; |
int a[MAX], b[MAX]; |
|
N1 = rand () % 13; |
N2 = rand () % (MAX - 51) + 50; |
step = rand () % 7 + 1; |
|
printf ("N1 = %d\nN2 = %d\nstep = %d\n", N1, N2, step); |
|
for (i = N1; i <= N2; i += step) |
a[i] = 42+ i; |
|
/* COUNTING UP (<). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N1; i < N2; i += step) |
b[i] = a[i]; |
} |
|
/* COUNTING UP (<). Check that all the cells were filled in properly. */ |
for (i = N1; i < N2; i += step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i < %d; i += %d) [OK]\n", N1, N2, step); |
|
/* COUNTING UP (<=). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N1; i <= N2; i += step) |
b[i] = a[i]; |
} |
|
/* COUNTING UP (<=). Check that all the cells were filled in properly. */ |
for (i = N1; i <= N2; i += step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i <= %d; i += %d) [OK]\n", N1, N2, step); |
|
/* COUNTING DOWN (>). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N2; i > N1; i -= step) |
b[i] = a[i]; |
} |
|
/* COUNTING DOWN (>). Check that all the cells were filled in properly. */ |
for (i = N2; i > N1; i -= step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i > %d; i -= %d) [OK]\n", N2, N1, step); |
|
/* COUNTING DOWN (>=). Fill in array 'b' in parallel. */ |
memset (b, 0, sizeof b); |
#pragma omp parallel shared(a,b,N1,N2,step) private(i) |
{ |
#pragma omp for |
for (i = N2; i >= N1; i -= step) |
b[i] = a[i]; |
} |
|
/* COUNTING DOWN (>=). Check that all the cells were filled in properly. */ |
for (i = N2; i >= N1; i -= step) |
if (a[i] != b[i]) |
abort (); |
|
printf ("for (i = %d; i >= %d; i -= %d) [OK]\n", N2, N1, step); |
} |
|
int |
main () |
{ |
int i; |
|
srand (0); |
for (i = 0; i < 10; ++i) |
main1(); |
return 0; |
} |
/libgomp.c/shared-1.c
0,0 → 1,58
extern void abort (void); |
|
struct Y |
{ |
int l[5][10]; |
}; |
|
struct X |
{ |
struct Y y; |
float b[10]; |
}; |
|
void |
parallel (int a, int b) |
{ |
int i, j; |
struct X A[10][5]; |
a = b = 3; |
|
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
A[i][j].y.l[3][3] = -10; |
|
#pragma omp parallel shared (a, b, A) num_threads (5) |
{ |
int i, j; |
|
#pragma omp atomic |
a += omp_get_num_threads (); |
|
#pragma omp atomic |
b += omp_get_num_threads (); |
|
#pragma omp for private (j) |
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
A[i][j].y.l[3][3] += 20; |
|
} |
|
for (i = 0; i < 10; i++) |
for (j = 0; j < 5; j++) |
if (A[i][j].y.l[3][3] != 10) |
abort (); |
|
if (a != 28) |
abort (); |
|
if (b != 28) |
abort (); |
} |
|
main() |
{ |
parallel (1, 2); |
return 0; |
} |
/libgomp.c/omp_workshare4.c
0,0 → 1,48
/****************************************************************************** |
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version |
* FILE: omp_workshare4.c |
* DESCRIPTION: |
* This is a corrected version of the omp_workshare3.c example. Corrections |
* include removing all statements between the parallel for construct and |
* the actual for loop, and introducing logic to preserve the ability to |
* query a thread's id and print it from inside the for loop. |
* SOURCE: Blaise Barney 5/99 |
* LAST REVISED: 03/03/2002 |
******************************************************************************/ |
|
#include <omp.h> |
#include <stdio.h> |
#define N 50 |
#define CHUNKSIZE 5 |
|
main () { |
|
int i, chunk, tid; |
float a[N], b[N], c[N]; |
char first_time; |
|
/* Some initializations */ |
for (i=0; i < N; i++) |
a[i] = b[i] = i * 1.0; |
chunk = CHUNKSIZE; |
first_time = 'y'; |
|
#pragma omp parallel for \ |
shared(a,b,c,chunk) \ |
private(i,tid) \ |
schedule(static,chunk) \ |
firstprivate(first_time) |
|
for (i=0; i < N; i++) |
{ |
if (first_time == 'y') |
{ |
tid = omp_get_thread_num(); |
first_time = 'n'; |
} |
c[i] = a[i] + b[i]; |
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); |
} |
|
return 0; |
} |
/libgomp.c/lib-1.c
0,0 → 1,99
#include <stdlib.h> |
#include <omp.h> |
|
int |
main (void) |
{ |
double d, e; |
int l; |
omp_lock_t lck; |
omp_nest_lock_t nlck; |
|
d = omp_get_wtime (); |
|
omp_init_lock (&lck); |
omp_set_lock (&lck); |
if (omp_test_lock (&lck)) |
abort (); |
omp_unset_lock (&lck); |
if (! omp_test_lock (&lck)) |
abort (); |
if (omp_test_lock (&lck)) |
abort (); |
omp_unset_lock (&lck); |
omp_destroy_lock (&lck); |
|
omp_init_nest_lock (&nlck); |
if (omp_test_nest_lock (&nlck) != 1) |
abort (); |
omp_set_nest_lock (&nlck); |
if (omp_test_nest_lock (&nlck) != 3) |
abort (); |
omp_unset_nest_lock (&nlck); |
omp_unset_nest_lock (&nlck); |
if (omp_test_nest_lock (&nlck) != 2) |
abort (); |
omp_unset_nest_lock (&nlck); |
omp_unset_nest_lock (&nlck); |
omp_destroy_nest_lock (&nlck); |
|
omp_set_dynamic (1); |
if (! omp_get_dynamic ()) |
abort (); |
omp_set_dynamic (0); |
if (omp_get_dynamic ()) |
abort (); |
|
omp_set_nested (1); |
if (! omp_get_nested ()) |
abort (); |
omp_set_nested (0); |
if (omp_get_nested ()) |
abort (); |
|
omp_set_num_threads (5); |
if (omp_get_num_threads () != 1) |
abort (); |
if (omp_get_max_threads () != 5) |
abort (); |
if (omp_get_thread_num () != 0) |
abort (); |
omp_set_num_threads (3); |
if (omp_get_num_threads () != 1) |
abort (); |
if (omp_get_max_threads () != 3) |
abort (); |
if (omp_get_thread_num () != 0) |
abort (); |
l = 0; |
#pragma omp parallel reduction (|:l) |
{ |
l = omp_get_num_threads () != 3; |
l |= omp_get_thread_num () < 0; |
l |= omp_get_thread_num () >= 3; |
#pragma omp master |
l |= omp_get_thread_num () != 0; |
} |
if (l) |
abort (); |
|
if (omp_get_num_procs () <= 0) |
abort (); |
if (omp_in_parallel ()) |
abort (); |
#pragma omp parallel reduction (|:l) |
l = ! omp_in_parallel (); |
#pragma omp parallel reduction (|:l) if (1) |
l = ! omp_in_parallel (); |
|
e = omp_get_wtime (); |
if (d > e) |
abort (); |
d = omp_get_wtick (); |
/* Negative precision is definitely wrong, |
bigger than 1s clock resolution is also strange. */ |
if (d <= 0 || d > 1) |
abort (); |
|
return 0; |
} |
/libgomp.c/nestedfn-1.c
0,0 → 1,49
/* { dg-do run } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int a = 1, b = 2, c = 3; |
void |
foo (void) |
{ |
int l = 0; |
#pragma omp parallel shared (a) private (b) firstprivate (c) \ |
num_threads (2) reduction (||:l) |
{ |
if (a != 1 || c != 3) l = 1; |
#pragma omp barrier |
if (omp_get_thread_num () == 0) |
{ |
a = 4; |
b = 5; |
c = 6; |
} |
#pragma omp barrier |
if (omp_get_thread_num () == 1) |
{ |
if (a != 4 || c != 3) l = 1; |
a = 7; |
b = 8; |
c = 9; |
} |
else if (omp_get_num_threads () == 1) |
a = 7; |
#pragma omp barrier |
if (omp_get_thread_num () == 0) |
if (a != 7 || b != 5 || c != 6) l = 1; |
#pragma omp barrier |
if (omp_get_thread_num () == 1) |
if (a != 7 || b != 8 || c != 9) l = 1; |
} |
if (l) |
abort (); |
} |
foo (); |
if (a != 7) |
abort (); |
return 0; |
} |
/libgomp.c/omp-loop03.c
0,0 → 1,26
extern void abort (void); |
int a; |
|
void |
foo () |
{ |
int i; |
a = 30; |
#pragma omp barrier |
#pragma omp for lastprivate (a) |
for (i = 0; i < 1024; i++) |
{ |
a = i; |
} |
if (a != 1023) |
abort (); |
} |
|
int |
main (void) |
{ |
#pragma omp parallel num_threads (64) |
foo (); |
|
return 0; |
} |
/libgomp.c/nestedfn-3.c
0,0 → 1,52
/* { dg-do run } */ |
|
#include <omp.h> |
|
extern void abort (void); |
|
int |
main (void) |
{ |
int i = 5, l = 0; |
int foo (void) { return i == 6; } |
int bar (void) { return i - 3; } |
|
omp_set_dynamic (0); |
|
#pragma omp parallel if (foo ()) num_threads (bar ()) reduction (|:l) |
if (omp_get_num_threads () != 1) |
l = 1; |
|
i++; |
|
#pragma omp parallel if (foo ()) num_threads (bar ()) reduction (|:l) |
if (omp_get_num_threads () != 3) |
l = 1; |
|
i++; |
|
#pragma omp master |
if (bar () != 4) |
abort (); |
|
#pragma omp single |
{ |
if (foo ()) |
abort (); |
i--; |
if (! foo ()) |
abort (); |
} |
|
if (l) |
abort (); |
|
i = 8; |
#pragma omp atomic |
l += bar (); |
|
if (l != 5) |
abort (); |
|
return 0; |
} |
/libgomp.c/shared-3.c
0,0 → 1,19
/* { dg-do run } */ |
|
void abort (void); |
|
int main() |
{ |
int x; |
int *p; |
|
p = &x; |
|
#pragma omp parallel |
{ |
if (p != &x) |
abort (); |
} |
|
return 0; |
} |
/libgomp.c/omp_reduction.c
0,0 → 1,35
/****************************************************************************** |
* FILE: omp_reduction.c |
* DESCRIPTION: |
* OpenMP Example - Combined Parallel Loop Reduction - C/C++ Version |
* This example demonstrates a sum reduction within a combined parallel loop |
* construct. Notice that default data element scoping is assumed - there |
* are no clauses specifying shared or private variables. OpenMP will |
* automatically make loop index variables private within team threads, and |
* global variables shared. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
|
int main (int argc, char *argv[]) { |
|
int i, n; |
float a[100], b[100], sum; |
|
/* Some initializations */ |
n = 100; |
for (i=0; i < n; i++) |
a[i] = b[i] = i * 1.0; |
sum = 0.0; |
|
#pragma omp parallel for reduction(+:sum) |
for (i=0; i < n; i++) |
sum = sum + (a[i] * b[i]); |
|
printf(" Sum = %f\n",sum); |
|
return 0; |
} |
/libgomp.c/loop-1.c
0,0 → 1,140
/* Test that all loop iterations are touched. This doesn't verify |
scheduling order, merely coverage. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
#define N 10000 |
static int S, E, INCR, CHUNK, NTHR; |
static int data[N]; |
|
static void clean_data (void) |
{ |
memset (data, -1, sizeof (data)); |
} |
|
static void test_data (void) |
{ |
int i, j; |
|
for (i = 0; i < S; ++i) |
assert (data[i] == -1); |
|
for (j = 0; i < E; ++i, j = (j + 1) % INCR) |
if (j == 0) |
assert (data[i] != -1); |
else |
assert (data[i] == -1); |
|
for (; i < N; ++i) |
assert (data[i] == -1); |
} |
|
static void set_data (long i, int val) |
{ |
int old; |
assert (i >= 0 && i < N); |
old = __sync_lock_test_and_set (data+i, val); |
assert (old == -1); |
} |
|
|
#define TMPL_1(sched) \ |
static void f_##sched##_1 (void *dummy) \ |
{ \ |
int iam = omp_get_thread_num (); \ |
long s0, e0, i; \ |
if (GOMP_loop_##sched##_start (S, E, INCR, CHUNK, &s0, &e0)) \ |
do \ |
{ \ |
for (i = s0; i < e0; i += INCR) \ |
set_data (i, iam); \ |
} \ |
while (GOMP_loop_##sched##_next (&s0, &e0)); \ |
GOMP_loop_end (); \ |
} \ |
static void t_##sched##_1 (void) \ |
{ \ |
clean_data (); \ |
GOMP_parallel_start (f_##sched##_1, NULL, NTHR); \ |
f_##sched##_1 (NULL); \ |
GOMP_parallel_end (); \ |
test_data (); \ |
} |
|
TMPL_1(static) |
TMPL_1(dynamic) |
TMPL_1(guided) |
|
#define TMPL_2(sched) \ |
static void f_##sched##_2 (void *dummy) \ |
{ \ |
int iam = omp_get_thread_num (); \ |
long s0, e0, i; \ |
while (GOMP_loop_##sched##_next (&s0, &e0)) \ |
{ \ |
for (i = s0; i < e0; i += INCR) \ |
set_data (i, iam); \ |
} \ |
GOMP_loop_end_nowait (); \ |
} \ |
static void t_##sched##_2 (void) \ |
{ \ |
clean_data (); \ |
GOMP_parallel_loop_##sched##_start \ |
(f_##sched##_2, NULL, NTHR, S, E, INCR, CHUNK); \ |
f_##sched##_2 (NULL); \ |
GOMP_parallel_end (); \ |
test_data (); \ |
} |
|
TMPL_2(static) |
TMPL_2(dynamic) |
TMPL_2(guided) |
|
static void test (void) |
{ |
t_static_1 (); |
t_dynamic_1 (); |
t_guided_1 (); |
t_static_2 (); |
t_dynamic_2 (); |
t_guided_2 (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
NTHR = 4; |
|
S = 0, E = N, INCR = 1, CHUNK = 4; |
test (); |
|
S = 0, E = N, INCR = 2, CHUNK = 4; |
test (); |
|
S = 1, E = N-1, INCR = 1, CHUNK = 5; |
test (); |
|
S = 1, E = N-1, INCR = 2, CHUNK = 5; |
test (); |
|
S = 2, E = 4, INCR = 1, CHUNK = 1; |
test (); |
|
S = 0, E = N, INCR = 1, CHUNK = 0; |
t_static_1 (); |
t_static_2 (); |
|
S = 1, E = N-1, INCR = 1, CHUNK = 0; |
t_static_1 (); |
t_static_2 (); |
|
return 0; |
} |
/libgomp.c/nestedfn-5.c
0,0 → 1,38
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
|
extern void abort (void); |
|
void |
foo (int *j) |
{ |
int i = 5; |
int bar (void) { return i + 1; } |
#pragma omp sections |
{ |
#pragma omp section |
{ |
if (bar () != 6) |
#pragma omp atomic |
++*j; |
} |
#pragma omp section |
{ |
if (bar () != 6) |
#pragma omp atomic |
++*j; |
} |
} |
} |
|
int |
main (void) |
{ |
int j = 0; |
#pragma omp parallel num_threads (2) |
foo (&j); |
if (j) |
abort (); |
return 0; |
} |
|
/libgomp.c/loop-3.c
0,0 → 1,24
/* { dg-do run } */ |
|
extern void abort (void); |
|
volatile int count; |
static int test(void) |
{ |
return ++count > 0; |
} |
|
int main() |
{ |
int i; |
#pragma omp for |
for (i = 0; i < 10; ++i) |
{ |
if (test()) |
continue; |
abort (); |
} |
if (i != count) |
abort (); |
return 0; |
} |
/libgomp.c/atomic-2.c
0,0 → 1,35
/* { dg-do run } */ |
/* { dg-options "-O2 -fopenmp" } */ |
/* { dg-options "-O2 -fopenmp -march=nocona" { target i?86-*-* x86_64-*-* } } */ |
/* { dg-options "-O2 -fopenmp" { target ilp32 } } */ |
|
double d = 1.5; |
long double ld = 3; |
extern void abort (void); |
|
void |
test (void) |
{ |
#pragma omp atomic |
d *= 1.25; |
#pragma omp atomic |
ld /= 0.75; |
if (d != 1.875 || ld != 4.0L) |
abort (); |
} |
|
int |
main (void) |
{ |
#ifdef __x86_64__ |
# define bit_SSE3 (1 << 0) |
# define bit_CX16 (1 << 13) |
unsigned int ax, bx, cx, dx; |
__asm__ ("cpuid" : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) |
: "0" (1) : "cc"); |
if ((cx & (bit_SSE3 | bit_CX16)) != (bit_SSE3 | bit_CX16)) |
return 0; |
#endif |
test (); |
return 0; |
} |
/libgomp.c/pr32362-1.c
0,0 → 1,32
/* PR middle-end/32362 */ |
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int |
main () |
{ |
int n[4] = { -1, -1, -1, -1 }; |
static int a = 2, b = 4; |
omp_set_num_threads (4); |
omp_set_dynamic (0); |
omp_set_nested (1); |
#pragma omp parallel private(b) |
{ |
b = omp_get_thread_num (); |
#pragma omp parallel firstprivate(a) |
{ |
a = (omp_get_thread_num () + a) + 1; |
if (b == omp_get_thread_num ()) |
n[omp_get_thread_num ()] = a + (b << 4); |
} |
} |
if (n[0] != 3) |
abort (); |
if (n[3] != -1 |
&& (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) |
abort (); |
return 0; |
} |
/libgomp.c/single-1.c
0,0 → 1,53
/* Trivial test of single. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <sys/time.h> |
#include <unistd.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
static int test; |
|
static void f_nocopy (void *dummy) |
{ |
if (GOMP_single_start ()) |
{ |
int iam = omp_get_thread_num (); |
int old = __sync_lock_test_and_set (&test, iam); |
assert (old == -1); |
} |
} |
|
static void f_copy (void *dummy) |
{ |
int *x = GOMP_single_copy_start (); |
if (x == NULL) |
{ |
int iam = omp_get_thread_num (); |
int old = __sync_lock_test_and_set (&test, iam); |
assert (old == -1); |
GOMP_single_copy_end (&test); |
} |
else |
assert (x == &test); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
test = -1; |
GOMP_parallel_start (f_nocopy, NULL, 3); |
f_nocopy (NULL); |
GOMP_parallel_end (); |
|
test = -1; |
GOMP_parallel_start (f_copy, NULL, 3); |
f_copy (NULL); |
GOMP_parallel_end (); |
|
return 0; |
} |
/libgomp.c/pr32362-3.c
0,0 → 1,34
/* PR middle-end/32362 */ |
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int a = 2; |
|
int |
main () |
{ |
int n[4] = { -1, -1, -1, -1 }; |
int b = 4; |
omp_set_num_threads (4); |
omp_set_dynamic (0); |
omp_set_nested (1); |
#pragma omp parallel private(b) |
{ |
b = omp_get_thread_num (); |
#pragma omp parallel firstprivate(a) |
{ |
a = (omp_get_thread_num () + a) + 1; |
if (b == omp_get_thread_num ()) |
n[omp_get_thread_num ()] = a + (b << 4); |
} |
} |
if (n[0] != 3) |
abort (); |
if (n[3] != -1 |
&& (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) |
abort (); |
return 0; |
} |
/libgomp.c/omp-parallel-if.c
0,0 → 1,40
#include <omp.h> |
|
extern void abort (void); |
|
int |
foo (void) |
{ |
return 10; |
} |
|
main () |
{ |
int A = 0; |
|
#pragma omp parallel if (foo () > 10) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 1) |
abort (); |
|
#pragma omp parallel if (foo () == 10) num_threads (3) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 3) |
abort (); |
|
#pragma omp parallel if (foo () == 10) num_threads (foo ()) shared (A) |
{ |
A = omp_get_num_threads (); |
} |
|
if (A != 10) |
abort (); |
|
return 0; |
} |
/libgomp.c/ordered-1.c
0,0 → 1,115
/* Test that all loop iterations are touched. This doesn't verify |
scheduling order, merely coverage. */ |
/* Note that we never call GOMP_ordered_start in here. AFAICS, this is |
valid; the only requirement is "not more than once per iteration". */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
#define N 1000 |
static int S, E, INCR, CHUNK, NTHR; |
static int data[N]; |
|
static void clean_data (void) |
{ |
memset (data, -1, sizeof (data)); |
} |
|
static void test_data (void) |
{ |
int i, j; |
|
for (i = 0; i < S; ++i) |
assert (data[i] == -1); |
|
for (j = 0; i < E; ++i, j = (j + 1) % INCR) |
if (j == 0) |
assert (data[i] != -1); |
else |
assert (data[i] == -1); |
|
for (; i < N; ++i) |
assert (data[i] == -1); |
} |
|
static void set_data (long i, int val) |
{ |
int old; |
assert (i >= 0 && i < N); |
old = __sync_lock_test_and_set (data+i, val); |
assert (old == -1); |
} |
|
|
#define TMPL_1(sched) \ |
static void f_##sched##_1 (void *dummy) \ |
{ \ |
int iam = omp_get_thread_num (); \ |
long s0, e0, i; \ |
if (GOMP_loop_ordered_##sched##_start (S, E, INCR, CHUNK, &s0, &e0)) \ |
do \ |
{ \ |
for (i = s0; i < e0; i += INCR) \ |
set_data (i, iam); \ |
} \ |
while (GOMP_loop_ordered_##sched##_next (&s0, &e0)); \ |
GOMP_loop_end (); \ |
} \ |
static void t_##sched##_1 (void) \ |
{ \ |
clean_data (); \ |
GOMP_parallel_start (f_##sched##_1, NULL, NTHR); \ |
f_##sched##_1 (NULL); \ |
GOMP_parallel_end (); \ |
test_data (); \ |
} |
|
TMPL_1(static) |
TMPL_1(dynamic) |
TMPL_1(guided) |
|
static void test (void) |
{ |
t_static_1 (); |
t_dynamic_1 (); |
t_guided_1 (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
NTHR = 4; |
|
S = 0, E = N, INCR = 1, CHUNK = 4; |
test (); |
|
S = 0, E = N, INCR = 2, CHUNK = 4; |
test (); |
|
S = 1, E = N-1, INCR = 1, CHUNK = 5; |
test (); |
|
S = 1, E = N-1, INCR = 2, CHUNK = 5; |
test (); |
|
S = 2, E = 4, INCR = 1, CHUNK = 1; |
test (); |
|
S = 0, E = N, INCR = 1, CHUNK = 0; |
t_static_1 (); |
|
S = 1, E = N-1, INCR = 1, CHUNK = 0; |
t_static_1 (); |
|
NTHR = 10; |
S = 1, E = 9, INCR = 1, CHUNK = 0; |
t_static_1 (); |
|
return 0; |
} |
/libgomp.c/ordered-3.c
0,0 → 1,82
#include <stdlib.h> |
|
int cnt; |
|
void |
check (int x) |
{ |
if (cnt++ != x) |
abort (); |
} |
|
int |
main (void) |
{ |
int j; |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (0) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (static, 1) num_threads (4) if (1) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (runtime) num_threads (4) if (0) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (runtime) num_threads (4) if (1) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (0) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (dynamic) num_threads (4) if (1) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (guided) num_threads (4) if (0) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
cnt = 0; |
#pragma omp parallel for ordered schedule (guided) num_threads (4) if (1) |
for (j = 0; j < 1000; j++) |
{ |
#pragma omp ordered |
check (j); |
} |
|
return 0; |
} |
/libgomp.c/pr26943-2.c
0,0 → 1,47
/* PR c++/26943 */ |
/* { dg-do run } */ |
|
extern int omp_set_dynamic (int); |
extern void abort (void); |
|
int a = 8, b = 12, c = 16, d = 20, j = 0; |
char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d"; |
|
int |
main (void) |
{ |
int i; |
omp_set_dynamic (0); |
#pragma omp parallel for shared (a, e) firstprivate (b, f) \ |
lastprivate (c, g) private (d, h) \ |
schedule (static, 1) num_threads (4) \ |
reduction (+:j) |
for (i = 0; i < 4; i++) |
{ |
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b') |
j++; |
#pragma omp barrier |
#pragma omp atomic |
a += i; |
b += i; |
c = i; |
d = i; |
#pragma omp atomic |
e[0] += i; |
f[0] += i; |
g[0] = 'g' + i; |
h[0] = 'h' + i; |
#pragma omp barrier |
if (a != 8 + 6 || b != 12 + i || c != i || d != i) |
j += 8; |
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i) |
j += 64; |
if (h[0] != 'h' + i) |
j += 512; |
} |
if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20) |
abort (); |
if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd') |
abort (); |
return 0; |
} |
/libgomp.c/pr32468.c
0,0 → 1,26
/* PR libgomp/32468 */ |
/* { dg-do run } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int res[2] = { -1, -1 }; |
omp_set_dynamic (0); |
omp_set_num_threads (4); |
#pragma omp parallel |
{ |
#pragma omp sections |
{ |
#pragma omp section |
res[0] = omp_get_num_threads () != 4; |
#pragma omp section |
res[1] = omp_get_num_threads () != 4; |
} |
} |
if (res[0] != 0 || res[1] != 0) |
abort (); |
return 0; |
} |
/libgomp.c/parallel-1.c
0,0 → 1,48
/* Trivial test of thread startup. */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
static int nthr; |
static int saw[4]; |
|
static void function(void *dummy) |
{ |
int iam = omp_get_thread_num (); |
|
if (iam == 0) |
nthr = omp_get_num_threads (); |
|
saw[iam] = 1; |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
GOMP_parallel_start (function, NULL, 2); |
function (NULL); |
GOMP_parallel_end (); |
|
assert (nthr == 2); |
assert (saw[0] != 0); |
assert (saw[1] != 0); |
assert (saw[2] == 0); |
|
memset (saw, 0, sizeof (saw)); |
|
GOMP_parallel_start (function, NULL, 3); |
function (NULL); |
GOMP_parallel_end (); |
|
assert (nthr == 3); |
assert (saw[0] != 0); |
assert (saw[1] != 0); |
assert (saw[2] != 0); |
assert (saw[3] == 0); |
|
return 0; |
} |
/libgomp.c/pr26943-4.c
0,0 → 1,61
/* PR c++/26943 */ |
/* { dg-do run } */ |
|
extern int omp_set_dynamic (int); |
extern int omp_get_thread_num (void); |
extern void abort (void); |
|
int a = 8, b = 12, c = 16, d = 20, j = 0, l = 0; |
char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d"; |
volatile int k; |
|
int |
main (void) |
{ |
int i; |
omp_set_dynamic (0); |
omp_set_nested (1); |
#pragma omp parallel num_threads (2) reduction (+:l) \ |
firstprivate (a, b, c, d, e, f, g, h, j) |
if (k == omp_get_thread_num ()) |
{ |
#pragma omp parallel for shared (a, e) firstprivate (b, f) \ |
lastprivate (c, g) private (d, h) \ |
schedule (static, 1) num_threads (4) \ |
reduction (+:j) |
for (i = 0; i < 4; i++) |
{ |
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b') |
j++; |
#pragma omp barrier |
#pragma omp atomic |
a += i; |
b += i; |
c = i; |
d = i; |
#pragma omp atomic |
e[0] += i; |
f[0] += i; |
g[0] = 'g' + i; |
h[0] = 'h' + i; |
#pragma omp barrier |
if (a != 8 + 6 || b != 12 + i || c != i || d != i) |
j += 8; |
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i) |
j += 64; |
if (h[0] != 'h' + i) |
j += 512; |
} |
if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20) |
++l; |
if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd') |
l += 8; |
} |
if (l) |
abort (); |
if (a != 8 || b != 12 || c != 16 || d != 20) |
abort (); |
if (e[0] != 'a' || f[0] != 'b' || g[0] != 'c' || h[0] != 'd') |
abort (); |
return 0; |
} |
/libgomp.c/reduction-1.c
0,0 → 1,36
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0; |
double d = 1.0; |
#pragma omp parallel num_threads(4) reduction(+:i) reduction(*:d) reduction(&:k) |
{ |
if (i != 0 || d != 1.0 || k != ~0) |
#pragma omp atomic |
j |= 1; |
|
if (omp_get_num_threads () != 4) |
#pragma omp atomic |
j |= 2; |
|
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
|
if (j & 1) |
abort (); |
if ((j & 2) == 0) |
{ |
if (i != (0 + 1 + 2 + 3)) |
abort (); |
if (d != (1.0 * 2.0 * 3.0 * 4.0)) |
abort (); |
if (k != (~0 ^ 0x55)) |
abort (); |
} |
return 0; |
} |
/libgomp.c/reduction-3.c
0,0 → 1,51
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0, l; |
double d = 1.0; |
#pragma omp parallel num_threads(4) |
{ |
#pragma omp single |
{ |
i = 16; |
k ^= (1 << 16); |
d += 32.0; |
} |
|
#pragma omp for reduction(+:i) reduction(*:d) reduction(&:k) nowait |
for (l = 0; l < 4; l++) |
{ |
if (omp_get_num_threads () == 4 && (i != 0 || d != 1.0 || k != ~0)) |
#pragma omp atomic |
j |= 1; |
|
if (l == omp_get_thread_num ()) |
{ |
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
} |
|
if (omp_get_num_threads () == 4) |
{ |
#pragma omp barrier |
if (i != (16 + 0 + 1 + 2 + 3)) |
#pragma omp atomic |
j |= 2; |
if (d != (33.0 * 1.0 * 2.0 * 3.0 * 4.0)) |
#pragma omp atomic |
j |= 4; |
if (k != (~0 ^ 0x55 ^ (1 << 16))) |
#pragma omp atomic |
j |= 8; |
} |
} |
|
if (j) |
abort (); |
return 0; |
} |
/libgomp.c/omp-single-1.c
0,0 → 1,19
extern void abort (void); |
|
main() |
{ |
int i = 0; |
|
#pragma omp parallel shared (i) |
{ |
#pragma omp single |
{ |
i++; |
} |
} |
|
if (i != 1) |
abort (); |
|
return 0; |
} |
/libgomp.c/omp-parallel-for.c
0,0 → 1,20
extern void abort (void); |
|
main() |
{ |
int i, a; |
|
a = 30; |
|
#pragma omp parallel for firstprivate (a) lastprivate (a) \ |
num_threads (2) schedule(static) |
for (i = 0; i < 10; i++) |
a = a + i; |
|
/* The thread that owns the last iteration will have computed |
30 + 5 + 6 + 7 + 8 + 9 = 65. */ |
if (a != 65) |
abort (); |
|
return 0; |
} |
/libgomp.c/omp-single-3.c
0,0 → 1,21
extern void abort (void); |
|
void |
single (int a, int b) |
{ |
#pragma omp single copyprivate(a) copyprivate(b) |
{ |
a = b = 5; |
} |
|
if (a != b) |
abort (); |
} |
|
int main() |
{ |
#pragma omp parallel |
single (1, 2); |
|
return 0; |
} |
/libgomp.c/nested-1.c
0,0 → 1,30
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = -1, j = -1; |
|
omp_set_nested (1); |
omp_set_dynamic (0); |
#pragma omp parallel num_threads (4) |
{ |
#pragma omp single |
{ |
i = omp_get_thread_num () + omp_get_num_threads () * 256; |
#pragma omp parallel num_threads (2) |
{ |
#pragma omp single |
{ |
j = omp_get_thread_num () + omp_get_num_threads () * 256; |
} |
} |
} |
} |
if (i < 4 * 256 || i >= 4 * 256 + 4) |
abort (); |
if (j < 2 * 256 || j >= 2 * 256 + 2) |
abort (); |
return 0; |
} |
/libgomp.c/sections-1.c
0,0 → 1,85
/* Test that all sections are touched. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
#define N 100 |
static int data[N]; |
static int NTHR; |
|
static void clean_data (void) |
{ |
memset (data, -1, sizeof (data)); |
} |
|
static void test_data (void) |
{ |
int i; |
|
for (i = 0; i < N; ++i) |
assert (data[i] != -1); |
} |
|
static void set_data (unsigned i, int val) |
{ |
int old; |
assert (i >= 1 && i <= N); |
old = __sync_lock_test_and_set (data+i-1, val); |
assert (old == -1); |
} |
|
|
static void f_1 (void *dummy) |
{ |
int iam = omp_get_thread_num (); |
unsigned long s; |
|
for (s = GOMP_sections_start (N); s ; s = GOMP_sections_next ()) |
set_data (s, iam); |
GOMP_sections_end (); |
} |
|
static void test_1 (void) |
{ |
clean_data (); |
GOMP_parallel_start (f_1, NULL, NTHR); |
f_1 (NULL); |
GOMP_parallel_end (); |
test_data (); |
} |
|
static void f_2 (void *dummy) |
{ |
int iam = omp_get_thread_num (); |
unsigned s; |
|
while ((s = GOMP_sections_next ())) |
set_data (s, iam); |
GOMP_sections_end_nowait (); |
} |
|
static void test_2 (void) |
{ |
clean_data (); |
GOMP_parallel_sections_start (f_2, NULL, NTHR, N); |
f_2 (NULL); |
GOMP_parallel_end (); |
test_data (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
NTHR = 4; |
|
test_1 (); |
test_2 (); |
|
return 0; |
} |
/libgomp.c/critical-2.c
0,0 → 1,35
// { dg-do run } |
// Test several constructs within a parallel. At one point in development, |
// the critical directive clobbered the shared clause of the parallel. |
|
#include <omp.h> |
#include <stdlib.h> |
|
#define N 2000 |
|
int main() |
{ |
int A[N]; |
int nthreads; |
int i; |
|
#pragma omp parallel shared (A, nthreads) |
{ |
#pragma omp master |
nthreads = omp_get_num_threads (); |
|
#pragma omp for |
for (i = 0; i < N; i++) |
A[i] = 0; |
|
#pragma omp critical |
for (i = 0; i < N; i++) |
A[i] += 1; |
} |
|
for (i = 0; i < N; i++) |
if (A[i] != nthreads) |
abort (); |
|
return 0; |
} |
/libgomp.c/c.exp
0,0 → 1,24
if [info exists lang_library_path] then { |
unset lang_library_path |
unset lang_test_file |
unset lang_link_flags |
} |
|
load_lib libgomp-dg.exp |
|
# If a testcase doesn't have special options, use these. |
if ![info exists DEFAULT_CFLAGS] then { |
set DEFAULT_CFLAGS "-O2 -fopenmp" |
} |
|
# Initialize dg. |
dg-init |
|
# Gather a list of all tests. |
set tests [lsort [find $srcdir/$subdir *.c]] |
|
# Main loop. |
dg-runtest $tests "" $DEFAULT_CFLAGS |
|
# All done. |
dg-finish |
/libgomp.c/pr29947-2.c
0,0 → 1,328
/* PR libgomp/29947 */ |
/* { dg-options "-O2 -fopenmp" } */ |
/* { dg-do run } */ |
|
extern void abort (void); |
|
int cnt; |
|
void |
test1 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test2 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test3 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static, 1) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test4 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static, 1) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test5 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test6 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test7 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static, 1) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test8 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel reduction (+:e,c) |
{ |
#pragma omp for schedule (static, 1) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
#pragma omp atomic |
++cnt; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test9 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test10 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test11 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test12 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test13 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test14 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test15 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered |
for (i = j1; i <= k1; ++i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
void |
test16 (long j1, long k1, long j2, long k2) |
{ |
long i, e = 0, c = 0; |
#pragma omp parallel for reduction (+:e,c) schedule (static, 1) ordered |
for (i = k1; i >= j1; --i) |
{ |
if (i < j2 || i > k2) |
++e; |
#pragma omp ordered |
++c; |
} |
if (e || (c != j2 > k2 ? 0 : k2 - j2 + 1)) |
abort (); |
} |
|
int |
__attribute__((noinline)) |
test (long j1, long k1, long j2, long k2) |
{ |
test1 (j1, k1, j2, k2); |
test2 (j1, k1, j2, k2); |
test3 (j1, k1, j2, k2); |
test4 (j1, k1, j2, k2); |
test5 (j1, k1, j2, k2); |
test6 (j1, k1, j2, k2); |
test7 (j1, k1, j2, k2); |
test8 (j1, k1, j2, k2); |
test9 (j1, k1, j2, k2); |
test10 (j1, k1, j2, k2); |
test11 (j1, k1, j2, k2); |
test12 (j1, k1, j2, k2); |
test13 (j1, k1, j2, k2); |
test14 (j1, k1, j2, k2); |
test15 (j1, k1, j2, k2); |
test16 (j1, k1, j2, k2); |
return cnt; |
} |
|
int |
main (void) |
{ |
test (1, 5, 1, 5); |
test (5, 5, 5, 5); |
test (5, 4, 5, 4); |
test (5, 1, 5, 1); |
return 0; |
} |
/libgomp.c/copyin-2.c
0,0 → 1,34
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
/* { dg-require-effective-target tls_runtime } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
struct { int t; char buf[64]; } thr = { 32, "" }; |
#pragma omp threadprivate (thr) |
|
int |
main (void) |
{ |
int l = 0; |
|
omp_set_dynamic (0); |
omp_set_num_threads (6); |
|
#pragma omp parallel copyin (thr) reduction (||:l) |
{ |
l = thr.t != 32; |
thr.t = omp_get_thread_num () + 11; |
} |
|
if (l || thr.t != 11) |
abort (); |
|
#pragma omp parallel reduction (||:l) |
l = thr.t != omp_get_thread_num () + 11; |
|
if (l) |
abort (); |
return 0; |
} |
/libgomp.c/omp_workshare1.c
0,0 → 1,47
/****************************************************************************** |
* FILE: omp_workshare1.c |
* DESCRIPTION: |
* OpenMP Example - Loop Work-sharing - C/C++ Version |
* In this example, the iterations of a loop are scheduled dynamically |
* across the team of threads. A thread will perform CHUNK iterations |
* at a time before being scheduled for the next CHUNK of work. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
#define CHUNKSIZE 10 |
#define N 100 |
|
int main (int argc, char *argv[]) { |
|
int nthreads, tid, i, chunk; |
float a[N], b[N], c[N]; |
|
/* Some initializations */ |
for (i=0; i < N; i++) |
a[i] = b[i] = i * 1.0; |
chunk = CHUNKSIZE; |
|
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid) |
{ |
tid = omp_get_thread_num(); |
if (tid == 0) |
{ |
nthreads = omp_get_num_threads(); |
printf("Number of threads = %d\n", nthreads); |
} |
printf("Thread %d starting...\n",tid); |
|
#pragma omp for schedule(dynamic,chunk) |
for (i=0; i<N; i++) |
{ |
c[i] = a[i] + b[i]; |
printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); |
} |
|
} /* end of parallel section */ |
|
return 0; |
} |
/libgomp.c/omp_orphan.c
0,0 → 1,47
/****************************************************************************** |
* FILE: omp_orphan.c |
* DESCRIPTION: |
* OpenMP Example - Parallel region with an orphaned directive - C/C++ Version |
* This example demonstrates a dot product being performed by an orphaned |
* loop reduction construct. Scoping of the reduction variable is critical. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
#define VECLEN 100 |
|
float a[VECLEN], b[VECLEN], sum; |
|
float dotprod () |
{ |
int i,tid; |
|
tid = omp_get_thread_num(); |
#pragma omp for reduction(+:sum) |
for (i=0; i < VECLEN; i++) |
{ |
sum = sum + (a[i]*b[i]); |
printf(" tid= %d i=%d\n",tid,i); |
} |
|
return(sum); |
} |
|
|
int main (int argc, char *argv[]) |
{ |
int i; |
|
for (i=0; i < VECLEN; i++) |
a[i] = b[i] = 1.0 * i; |
sum = 0.0; |
|
#pragma omp parallel |
sum = dotprod(); |
|
printf("Sum = %f\n",sum); |
|
return 0; |
} |
/libgomp.c/omp_workshare3.c
0,0 → 1,43
/* { dg-do compile } */ |
|
/****************************************************************************** |
* OpenMP Example - Combined Parallel Loop Work-sharing - C/C++ Version |
* FILE: omp_workshare3.c |
* DESCRIPTION: |
* This example attempts to show use of the parallel for construct. However |
* it will generate errors at compile time. Try to determine what is causing |
* the error. See omp_workshare4.c for a corrected version. |
* SOURCE: Blaise Barney 5/99 |
* LAST REVISED: 03/03/2002 |
******************************************************************************/ |
|
#include <omp.h> |
#include <stdio.h> |
#define N 50 |
#define CHUNKSIZE 5 |
|
main () { |
|
int i, chunk, tid; |
float a[N], b[N], c[N]; |
|
/* Some initializations */ |
for (i=0; i < N; i++) |
a[i] = b[i] = i * 1.0; |
chunk = CHUNKSIZE; |
|
#pragma omp parallel for \ |
shared(a,b,c,chunk) \ |
private(i,tid) \ |
schedule(static,chunk) |
{ /* { dg-error "expected" } */ |
tid = omp_get_thread_num(); |
for (i=0; i < N; i++) |
{ |
c[i] = a[i] + b[i]; |
printf("tid= %d i= %d c[i]= %f\n", tid, i, c[i]); |
} |
} /* end of parallel for construct */ |
|
return 0; |
} |
/libgomp.c/omp-loop02.c
0,0 → 1,32
#include <omp.h> |
|
/* Orphaned work sharing. */ |
|
extern void abort (void); |
|
#define N 10 |
|
void parloop (int *a) |
{ |
int i; |
|
#pragma omp for |
for (i = 0; i < N; i++) |
a[i] = i + 3; |
} |
|
main() |
{ |
int i, a[N]; |
|
#pragma omp parallel shared(a) |
{ |
parloop (a); |
} |
|
for (i = 0; i < N; i++) |
if (a[i] != i + 3) |
abort (); |
|
return 0; |
} |
/libgomp.c/nestedfn-2.c
0,0 → 1,20
/* { dg-do run } */ |
|
extern void abort (void); |
|
int |
main (void) |
{ |
int i; |
void |
foo (void) |
{ |
#pragma omp master |
i += 8; |
} |
i = 4; |
foo (); |
if (i != 12) |
abort (); |
return 0; |
} |
/libgomp.c/shared-2.c
0,0 → 1,50
#include <stdio.h> |
#include <omp.h> |
|
extern void abort (void); |
|
void |
parallel (int a, int b) |
{ |
int bad, LASTPRIV, LASTPRIV_SEC; |
int i; |
|
a = b = 3; |
|
bad = 0; |
|
#pragma omp parallel firstprivate (a,b) shared (bad) num_threads (5) |
{ |
if (a != 3 || b != 3) |
bad = 1; |
|
#pragma omp for lastprivate (LASTPRIV) |
for (i = 0; i < 10; i++) |
LASTPRIV = i; |
|
#pragma omp sections lastprivate (LASTPRIV_SEC) |
{ |
#pragma omp section |
{ LASTPRIV_SEC = 3; } |
|
#pragma omp section |
{ LASTPRIV_SEC = 42; } |
} |
|
} |
|
if (LASTPRIV != 9) |
abort (); |
|
if (LASTPRIV_SEC != 42) |
abort (); |
|
if (bad) |
abort (); |
} |
|
int main() |
{ |
parallel (1, 2); |
return 0; |
} |
/libgomp.c/nestedfn-4.c
0,0 → 1,65
/* PR middle-end/25261 */ |
/* { dg-do run } */ |
|
#include <omp.h> |
|
extern void abort (void); |
|
int |
main (void) |
{ |
int i = 5, j, l = 0; |
int foo (void) |
{ |
return i == 6; |
} |
int bar (void) |
{ |
return i - 3; |
} |
|
omp_set_dynamic (0); |
|
#pragma omp parallel if (foo ()) num_threads (2) |
if (omp_get_num_threads () != 1) |
#pragma omp atomic |
l++; |
|
#pragma omp parallel for schedule (static, bar ()) num_threads (2) \ |
reduction (|:l) |
for (j = 0; j < 4; j++) |
if (omp_get_thread_num () != (j >= 2)) |
#pragma omp atomic |
l++; |
|
i++; |
|
#pragma omp parallel if (foo ()) num_threads (2) |
if (omp_get_num_threads () != 2) |
#pragma omp atomic |
l++; |
|
#pragma omp parallel for schedule (static, bar ()) num_threads (2) \ |
reduction (|:l) |
for (j = 0; j < 6; j++) |
if (omp_get_thread_num () != (j >= 3)) |
#pragma omp atomic |
l++; |
|
#pragma omp parallel num_threads (4) reduction (|:l) |
if (!foo () || bar () != 3) |
#pragma omp atomic |
l++; |
|
i++; |
|
#pragma omp parallel num_threads (4) reduction (|:l) |
if (foo () || bar () != 4) |
#pragma omp atomic |
l++; |
|
if (l) |
abort (); |
|
return 0; |
} |
/libgomp.c/loop-2.c
0,0 → 1,114
/* Validate static scheduling iteration dispatch. We only test with |
even thread distributions here; there are multiple valid solutions |
for uneven thread distributions. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
#define N 360 |
static int data[N][2]; |
static int INCR, NTHR, CHUNK; |
|
static void clean_data (void) |
{ |
memset (data, -1, sizeof (data)); |
} |
|
static void test_data (void) |
{ |
int n, i, c, thr, iter, chunk; |
|
chunk = CHUNK; |
if (chunk == 0) |
chunk = N / INCR / NTHR; |
|
thr = iter = c = i = 0; |
|
for (n = 0; n < N; ++n) |
{ |
if (i == 0) |
{ |
assert (data[n][0] == thr); |
assert (data[n][1] == iter); |
} |
else |
{ |
assert (data[n][0] == -1); |
assert (data[n][1] == -1); |
} |
|
if (++i == INCR) |
{ |
i = 0; |
if (++c == chunk) |
{ |
c = 0; |
if (++thr == NTHR) |
{ |
thr = 0; |
++iter; |
} |
} |
} |
} |
} |
|
static void set_data (long i, int thr, int iter) |
{ |
int old; |
assert (i >= 0 && i < N); |
old = __sync_lock_test_and_set (&data[i][0], thr); |
assert (old == -1); |
old = __sync_lock_test_and_set (&data[i][1], iter); |
assert (old == -1); |
} |
|
static void f_static_1 (void *dummy) |
{ |
int iam = omp_get_thread_num (); |
long s0, e0, i, count = 0; |
if (GOMP_loop_static_start (0, N, INCR, CHUNK, &s0, &e0)) |
do |
{ |
for (i = s0; i < e0; i += INCR) |
set_data (i, iam, count); |
++count; |
} |
while (GOMP_loop_static_next (&s0, &e0)); |
GOMP_loop_end (); |
} |
|
static void test (void) |
{ |
clean_data (); |
GOMP_parallel_start (f_static_1, NULL, NTHR); |
f_static_1 (NULL); |
GOMP_parallel_end (); |
test_data (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
NTHR = 5; |
|
INCR = 1, CHUNK = 0; /* chunk = 360 / 5 = 72 */ |
test (); |
|
INCR = 4, CHUNK = 0; /* chunk = 360 / 4 / 5 = 18 */ |
test (); |
|
INCR = 1, CHUNK = 4; /* 1 * 4 * 5 = 20 -> 360 / 20 = 18 iterations. */ |
test (); |
|
INCR = 3, CHUNK = 4; /* 3 * 4 * 5 = 60 -> 360 / 60 = 6 iterations. */ |
test (); |
|
return 0; |
} |
/libgomp.c/omp-nested-1.c
0,0 → 1,28
// { dg-do run } |
|
extern void abort(void); |
#define N 1000 |
|
int foo() |
{ |
int i = 0, j; |
|
#pragma omp parallel for num_threads(2) shared (i) |
for (j = 0; j < N; ++j) |
{ |
#pragma omp parallel num_threads(1) shared (i) |
{ |
#pragma omp atomic |
i++; |
} |
} |
|
return i; |
} |
|
int main() |
{ |
if (foo() != N) |
abort (); |
return 0; |
} |
/libgomp.c/atomic-1.c
0,0 → 1,64
/* { dg-do run } */ |
/* { dg-options "-O2 -fopenmp" } */ |
/* { dg-options "-O2 -fopenmp -march=pentium" { target i?86-*-* x86_64-*-* } } */ |
/* { dg-options "-O2 -fopenmp" { target lp64 } } */ |
|
#ifdef __i386__ |
#include "../../../gcc/testsuite/gcc.dg/i386-cpuid.h" |
#define bit_CX8 (1 << 8) |
#endif |
|
extern void abort (void); |
double d; |
struct |
{ |
int i; |
double e; |
int j; |
} x; |
|
void |
f1 (void) |
{ |
#pragma omp atomic |
d += 7.5; |
#pragma omp atomic |
d *= 2.5; |
#pragma omp atomic |
d /= 0.25; |
} |
|
void |
f2 (void) |
{ |
#pragma omp atomic |
x.e += 7.5; |
#pragma omp atomic |
x.e *= 2.5; |
#pragma omp atomic |
x.e /= 0.25; |
} |
|
int |
main (void) |
{ |
#ifdef __i386__ |
unsigned long cpu_facilities; |
|
cpu_facilities = i386_cpuid (); |
|
if ((cpu_facilities & bit_CX8) == 0) |
return 0; |
#endif |
|
d = 1.0; |
f1 (); |
if (d != 85.0) |
abort (); |
|
x.e = 1.0; |
f2 (); |
if (x.i != 0 || x.e != 85.0 || x.j != 0) |
abort (); |
return 0; |
} |
/libgomp.c/omp_hello.c
0,0 → 1,39
/****************************************************************************** |
* FILE: omp_hello.c |
* DESCRIPTION: |
* OpenMP Example - Hello World - C/C++ Version |
* In this simple example, the master thread forks a parallel region. |
* All threads in the team obtain their unique thread number and print it. |
* The master thread only prints the total number of threads. Two OpenMP |
* library routines are used to obtain the number of threads and each |
* thread's number. |
* AUTHOR: Blaise Barney 5/99 |
* LAST REVISED: 04/06/05 |
******************************************************************************/ |
#include <omp.h> |
#include <stdio.h> |
#include <stdlib.h> |
|
int main (int argc, char *argv[]) { |
|
int nthreads, tid; |
|
/* Fork a team of threads giving them their own copies of variables */ |
#pragma omp parallel private(nthreads, tid) |
{ |
|
/* Obtain thread number */ |
tid = omp_get_thread_num(); |
printf("Hello World from thread = %d\n", tid); |
|
/* Only master thread does this */ |
if (tid == 0) |
{ |
nthreads = omp_get_num_threads(); |
printf("Number of threads = %d\n", nthreads); |
} |
|
} /* All threads join master thread and disband */ |
|
return 0; |
} |
/libgomp.c/single-2.c
0,0 → 1,15
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i; |
i = 4; |
#pragma omp single copyprivate (i) |
{ |
i = 6; |
} |
if (i != 6) |
abort (); |
return 0; |
} |
/libgomp.c/pr32362-2.c
0,0 → 1,33
/* PR middle-end/32362 */ |
/* { dg-do run } */ |
/* { dg-options "-O2" } */ |
|
#include <omp.h> |
#include <stdlib.h> |
|
int a = 2, b = 4; |
|
int |
main () |
{ |
int n[4] = { -1, -1, -1, -1 }; |
omp_set_num_threads (4); |
omp_set_dynamic (0); |
omp_set_nested (1); |
#pragma omp parallel private(b) |
{ |
b = omp_get_thread_num (); |
#pragma omp parallel firstprivate(a) |
{ |
a = (omp_get_thread_num () + a) + 1; |
if (b == omp_get_thread_num ()) |
n[omp_get_thread_num ()] = a + (b << 4); |
} |
} |
if (n[0] != 3) |
abort (); |
if (n[3] != -1 |
&& (n[1] != 0x14 || n[2] != 0x25 || n[3] != 0x36)) |
abort (); |
return 0; |
} |
/libgomp.c/ordered-2.c
0,0 → 1,82
/* Trivial test of ordered. */ |
|
/* { dg-require-effective-target sync_int_long } */ |
|
#include <omp.h> |
#include <string.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
#define N 100 |
static int next; |
static int CHUNK, NTHR; |
|
static void clean_data (void) |
{ |
next = 0; |
} |
|
static void set_data (long i) |
{ |
int n = __sync_fetch_and_add (&next, 1); |
assert (n == i); |
} |
|
|
#define TMPL_1(sched) \ |
static void f_##sched##_1 (void *dummy) \ |
{ \ |
long s0, e0, i; \ |
if (GOMP_loop_ordered_##sched##_start (0, N, 1, CHUNK, &s0, &e0)) \ |
do \ |
{ \ |
for (i = s0; i < e0; ++i) \ |
{ \ |
GOMP_ordered_start (); \ |
set_data (i); \ |
GOMP_ordered_end (); \ |
} \ |
} \ |
while (GOMP_loop_ordered_##sched##_next (&s0, &e0)); \ |
GOMP_loop_end (); \ |
} \ |
static void t_##sched##_1 (void) \ |
{ \ |
clean_data (); \ |
GOMP_parallel_start (f_##sched##_1, NULL, NTHR); \ |
f_##sched##_1 (NULL); \ |
GOMP_parallel_end (); \ |
} |
|
TMPL_1(static) |
TMPL_1(dynamic) |
TMPL_1(guided) |
|
static void test (void) |
{ |
t_static_1 (); |
t_dynamic_1 (); |
t_guided_1 (); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
NTHR = 4; |
|
CHUNK = 1; |
test (); |
|
CHUNK = 5; |
test (); |
|
CHUNK = 7; |
test (); |
|
CHUNK = 0; |
t_static_1 (); |
|
return 0; |
} |
/libgomp.c/barrier-1.c
0,0 → 1,50
/* Trivial test of barrier. */ |
|
#include <omp.h> |
#include <sys/time.h> |
#include <unistd.h> |
#include <assert.h> |
#include "libgomp_g.h" |
|
|
struct timeval stamps[3][3]; |
|
static void function(void *dummy) |
{ |
int iam = omp_get_thread_num (); |
|
gettimeofday (&stamps[iam][0], NULL); |
if (iam == 0) |
usleep (10); |
|
GOMP_barrier (); |
|
if (iam == 0) |
{ |
gettimeofday (&stamps[0][1], NULL); |
usleep (10); |
} |
|
GOMP_barrier (); |
|
gettimeofday (&stamps[iam][2], NULL); |
} |
|
int main() |
{ |
omp_set_dynamic (0); |
|
GOMP_parallel_start (function, NULL, 3); |
function (NULL); |
GOMP_parallel_end (); |
|
assert (!timercmp (&stamps[0][0], &stamps[0][1], >)); |
assert (!timercmp (&stamps[1][0], &stamps[0][1], >)); |
assert (!timercmp (&stamps[2][0], &stamps[0][1], >)); |
|
assert (!timercmp (&stamps[0][1], &stamps[0][2], >)); |
assert (!timercmp (&stamps[0][1], &stamps[1][2], >)); |
assert (!timercmp (&stamps[0][1], &stamps[2][2], >)); |
|
return 0; |
} |
/libgomp.c/pr26943-1.c
0,0 → 1,24
/* PR c++/26943 */ |
/* { dg-do run } */ |
|
extern void abort (void); |
extern void omp_set_dynamic (int); |
int n = 6; |
|
int |
main (void) |
{ |
int i, x = 0; |
omp_set_dynamic (0); |
#pragma omp parallel for num_threads (16) firstprivate (n) lastprivate (n) \ |
schedule (static, 1) reduction (+: x) |
for (i = 0; i < 16; i++) |
{ |
if (n != 6) |
++x; |
n = i; |
} |
if (x || n != 15) |
abort (); |
return 0; |
} |
/libgomp.c/pr26943-3.c
0,0 → 1,56
/* PR c++/26943 */ |
/* { dg-do run } */ |
|
extern int omp_set_dynamic (int); |
extern int omp_get_thread_num (void); |
extern void abort (void); |
|
int a = 8, b = 12, c = 16, d = 20, j = 0, l = 0; |
char e[10] = "a", f[10] = "b", g[10] = "c", h[10] = "d"; |
volatile int k; |
|
int |
main (void) |
{ |
int i; |
omp_set_dynamic (0); |
omp_set_nested (1); |
#pragma omp parallel num_threads (2) reduction (+:l) |
if (k == omp_get_thread_num ()) |
{ |
#pragma omp parallel for shared (a, e) firstprivate (b, f) \ |
lastprivate (c, g) private (d, h) \ |
schedule (static, 1) num_threads (4) \ |
reduction (+:j) |
for (i = 0; i < 4; i++) |
{ |
if (a != 8 || b != 12 || e[0] != 'a' || f[0] != 'b') |
j++; |
#pragma omp barrier |
#pragma omp atomic |
a += i; |
b += i; |
c = i; |
d = i; |
#pragma omp atomic |
e[0] += i; |
f[0] += i; |
g[0] = 'g' + i; |
h[0] = 'h' + i; |
#pragma omp barrier |
if (a != 8 + 6 || b != 12 + i || c != i || d != i) |
j += 8; |
if (e[0] != 'a' + 6 || f[0] != 'b' + i || g[0] != 'g' + i) |
j += 64; |
if (h[0] != 'h' + i) |
j += 512; |
} |
if (j || a != 8 + 6 || b != 12 || c != 3 || d != 20) |
++l; |
if (e[0] != 'a' + 6 || f[0] != 'b' || g[0] != 'g' + 3 || h[0] != 'd') |
l += 8; |
} |
if (l) |
abort (); |
return 0; |
} |
/libgomp.c/atomic-10.c
0,0 → 1,140
/* { dg-do run } */ |
/* { dg-options "-O2 -fopenmp" } */ |
|
extern void abort (void); |
int x1, x2, x3, x4, x5; |
volatile int y6 = 9, y2, y3, y4, y5; |
volatile unsigned char z1, z2, z3, z4, z5; |
float a1, a2, a3, a4; |
|
void |
f1 (void) |
{ |
#pragma omp atomic |
x1++; |
#pragma omp atomic |
x2--; |
#pragma omp atomic |
++x3; |
#pragma omp atomic |
--x4; |
#pragma omp atomic |
x5 += 1; |
#pragma omp atomic |
x1 -= y6; |
#pragma omp atomic |
x2 |= 1; |
#pragma omp atomic |
x3 &= 1; |
#pragma omp atomic |
x4 ^= 1; |
#pragma omp atomic |
x5 *= 3; |
#pragma omp atomic |
x1 /= 3; |
#pragma omp atomic |
x2 /= 3; |
#pragma omp atomic |
x3 <<= 3; |
#pragma omp atomic |
x4 >>= 3; |
} |
|
void |
f2 (void) |
{ |
#pragma omp atomic |
y6++; |
#pragma omp atomic |
y2--; |
#pragma omp atomic |
++y3; |
#pragma omp atomic |
--y4; |
#pragma omp atomic |
y5 += 1; |
#pragma omp atomic |
y6 -= x1; |
#pragma omp atomic |
y2 |= 1; |
#pragma omp atomic |
y3 &= 1; |
#pragma omp atomic |
y4 ^= 1; |
#pragma omp atomic |
y5 *= 3; |
#pragma omp atomic |
y6 /= 3; |
#pragma omp atomic |
y2 /= 3; |
#pragma omp atomic |
y3 <<= 3; |
#pragma omp atomic |
y4 >>= 3; |
} |
|
void |
f3 (void) |
{ |
#pragma omp atomic |
z1++; |
#pragma omp atomic |
z2--; |
#pragma omp atomic |
++z3; |
#pragma omp atomic |
--z4; |
#pragma omp atomic |
z5 += 1; |
#pragma omp atomic |
z1 |= 1; |
#pragma omp atomic |
z2 &= 1; |
#pragma omp atomic |
z3 ^= 1; |
#pragma omp atomic |
z4 *= 3; |
#pragma omp atomic |
z5 /= 3; |
#pragma omp atomic |
z1 /= 3; |
#pragma omp atomic |
z2 <<= 3; |
#pragma omp atomic |
z3 >>= 3; |
} |
|
void |
f4 (void) |
{ |
#pragma omp atomic |
a1 += 8.0; |
#pragma omp atomic |
a2 *= 3.5; |
#pragma omp atomic |
a3 -= a1 + a2; |
#pragma omp atomic |
a4 /= 2.0; |
} |
|
int |
main (void) |
{ |
f1 (); |
if (x1 != -2 || x2 != 0 || x3 != 8 || x4 != -1 || x5 != 3) |
abort (); |
f2 (); |
if (y6 != 4 || y2 != 0 || y3 != 8 || y4 != -1 || y5 != 3) |
abort (); |
f3 (); |
if (z1 != 0 || z2 != 8 || z3 != 0 || z4 != 253 || z5 != 0) |
abort (); |
a1 = 7; |
a2 = 10; |
a3 = 11; |
a4 = 13; |
f4 (); |
if (a1 != 15.0 || a2 != 35.0 || a3 != -39.0 || a4 != 6.5) |
abort (); |
return 0; |
} |
/libgomp.c/reduction-2.c
0,0 → 1,50
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = ~0, l; |
double d = 1.0; |
#pragma omp parallel num_threads(4) |
{ |
#pragma omp single |
{ |
i = 16; |
k ^= (1 << 16); |
d += 32.0; |
} |
|
#pragma omp for reduction(+:i) reduction(*:d) reduction(&:k) |
for (l = 0; l < 4; l++) |
{ |
if (omp_get_num_threads () == 4 && (i != 0 || d != 1.0 || k != ~0)) |
#pragma omp atomic |
j |= 1; |
|
if (l == omp_get_thread_num ()) |
{ |
i = omp_get_thread_num (); |
d = i + 1; |
k = ~(1 << (2 * i)); |
} |
} |
|
if (omp_get_num_threads () == 4) |
{ |
if (i != (16 + 0 + 1 + 2 + 3)) |
#pragma omp atomic |
j |= 2; |
if (d != (33.0 * 1.0 * 2.0 * 3.0 * 4.0)) |
#pragma omp atomic |
j |= 4; |
if (k != (~0 ^ 0x55 ^ (1 << 16))) |
#pragma omp atomic |
j |= 8; |
} |
} |
|
if (j) |
abort (); |
return 0; |
} |
/libgomp.c/reduction-4.c
0,0 → 1,36
#include <omp.h> |
#include <stdlib.h> |
|
int |
main (void) |
{ |
int i = 0, j = 0, k = 0, l = 0; |
#pragma omp parallel num_threads(4) reduction(-:i) reduction(|:k) \ |
reduction(^:l) |
{ |
if (i != 0 || k != 0 || l != 0) |
#pragma omp atomic |
j |= 1; |
|
if (omp_get_num_threads () != 4) |
#pragma omp atomic |
j |= 2; |
|
i = omp_get_thread_num (); |
k = 1 << (2 * i); |
l = 0xea << (3 * i); |
} |
|
if (j & 1) |
abort (); |
if ((j & 2) == 0) |
{ |
if (i != (0 + 1 + 2 + 3)) |
abort (); |
if (k != 0x55) |
abort (); |
if (l != 0x1e93a) |
abort (); |
} |
return 0; |
} |