OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

Compare Revisions

  • This comparison shows the changes necessary to convert path
    /openrisc/trunk/gnu-old/gcc-4.2.2/libgomp/testsuite/libgomp.c/appendix-a
    from Rev 154 to Rev 816
    Reverse comparison

Rev 154 → Rev 816

/a.2.1.c
0,0 → 1,45
/* { dg-do run } */
 
#include <stdio.h>
#include <omp.h>
extern void abort (void);
int
main ()
{
int bad, x;
x = 2;
bad = 0;
#pragma omp parallel num_threads(2) shared(x, bad)
{
if (omp_get_thread_num () == 0)
{
volatile int i;
for (i = 0; i < 100000000; i++)
x = 5;
}
else
{
/* Print 1: the following read of x has a race */
if (x != 2 && x != 5)
bad = 1;
}
#pragma omp barrier
if (omp_get_thread_num () == 0)
{
/* x must be 5 now. */
if (x != 5)
bad = 1;
}
else
{
/* x must be 5 now. */
if (x != 5)
bad = 1;
}
}
 
if (bad)
abort ();
 
return 0;
}
/a.21.1.c
0,0 → 1,25
/* { dg-do run } */
 
#include <stdio.h>
void
work (int k)
{
#pragma omp ordered
printf (" %d\n", k);
}
 
void
a21 (int lb, int ub, int stride)
{
int i;
#pragma omp parallel for ordered schedule(dynamic)
for (i = lb; i < ub; i += stride)
work (i);
}
 
int
main ()
{
a21 (0, 100, 5);
return 0;
}
/a.3.1.c
0,0 → 1,11
/* { dg-do run } */
 
#include <stdio.h>
int
main ()
{
# ifdef _OPENMP
printf ("Compiled by an OpenMP-compliant implementation.\n");
# endif
return 0;
}
/a.4.1.c
0,0 → 1,38
/* { dg-do run } */
 
#include <omp.h>
extern void abort (void);
void
subdomain (float *x, int istart, int ipoints)
{
int i;
for (i = 0; i < ipoints; i++)
x[istart + i] = 123.456;
}
 
void
sub (float *x, int npoints)
{
int iam, nt, ipoints, istart;
#pragma omp parallel default(shared) private(iam,nt,ipoints,istart)
{
iam = omp_get_thread_num ();
nt = omp_get_num_threads ();
ipoints = npoints / nt; /* size of partition */
istart = iam * ipoints; /* starting array index */
if (iam == nt - 1) /* last thread may do more */
ipoints = npoints - istart;
subdomain (x, istart, ipoints);
}
}
int
main ()
{
int i;
float array[10000];
sub (array, 10000);
for (i = 0; i < 10000; i++)
if (array[i] < 123.45 || array[i] > 123.46)
abort ();
return 0;
}
/a.40.1.c
0,0 → 1,48
/* { dg-do compile } */
 
#include <omp.h>
typedef struct
{
int a, b;
omp_nest_lock_t lck;
} pair;
int work1 ();
int work2 ();
int work3 ();
void
incr_a (pair * p, int a)
{
/* Called only from incr_pair, no need to lock. */
p->a += a;
}
 
void
incr_b (pair * p, int b)
{
/* Called both from incr_pair and elsewhere, */
/* so need a nestable lock. */
omp_set_nest_lock (&p->lck);
p->b += b;
omp_unset_nest_lock (&p->lck);
}
 
void
incr_pair (pair * p, int a, int b)
{
omp_set_nest_lock (&p->lck);
incr_a (p, a);
incr_b (p, b);
omp_unset_nest_lock (&p->lck);
}
 
void
a40 (pair * p)
{
#pragma omp parallel sections
{
#pragma omp section
incr_pair (p, work1 (), work2 ());
#pragma omp section
incr_b (p, work3 ());
}
}
/a.5.1.c
0,0 → 1,13
/* { dg-do run } */
 
#include <omp.h>
int
main ()
{
omp_set_dynamic (1);
#pragma omp parallel num_threads(10)
{
/* do work here */
}
return 0;
}
/a.15.1.c
0,0 → 1,44
/* { dg-do run } */
 
#include <stdio.h>
 
void
work (int n)
{
printf ("[%d of %d], nested = %d, n = %d\n", omp_get_thread_num (), omp_get_num_threads(), omp_get_nested (), n);
}
 
void
sub3 (int n)
{
work (n);
#pragma omp barrier
work (n);
}
 
void
sub2 (int k)
{
#pragma omp parallel shared(k)
sub3 (k);
}
 
void
sub1 (int n)
{
int i;
#pragma omp parallel private(i) shared(n)
{
#pragma omp for
for (i = 0; i < n; i++)
sub2 (i);
}
}
int
main ()
{
sub1 (2);
sub2 (15);
sub3 (20);
return 0;
}
/a.16.1.c
0,0 → 1,47
/* { dg-do run } */
 
#include <stdio.h>
 
float
work1 (int i)
{
return 1.0 * i;
}
 
float
work2 (int i)
{
return 2.0 * i;
}
 
void
a16 (float *x, float *y, int *index, int n)
{
int i;
#pragma omp parallel for shared(x, y, index, n)
for (i = 0; i < n; i++)
{
#pragma omp atomic
x[index[i]] += work1 (i);
y[i] += work2 (i);
}
}
int
main ()
{
float x[1000];
float y[10000];
int index[10000];
int i;
for (i = 0; i < 10000; i++)
{
index[i] = i % 1000;
y[i] = 0.0;
}
for (i = 0; i < 1000; i++)
x[i] = 0.0;
a16 (x, y, index, 10000);
for (i = 0; i < 10; i++)
printf ("x[%d] = %f, y[%d] = %f\n", i, x[i], i, y[i]);
return 0;
}
/a.26.1.c
0,0 → 1,17
/* { dg-do run } */
 
#include <stdio.h>
int
main ()
{
int i, j;
i = 1;
j = 2;
#pragma omp parallel private(i) firstprivate(j)
{
i = 3;
j = j + 2;
}
printf ("%d %d\n", i, j); /* i and j are undefined */
return 0;
}
/a.33.3.c
0,0 → 1,16
/* { dg-do compile } */
 
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
omp_lock_t *
new_lock ()
{
omp_lock_t *lock_ptr;
#pragma omp single copyprivate(lock_ptr)
{
lock_ptr = (omp_lock_t *) malloc (sizeof (omp_lock_t));
omp_init_lock (lock_ptr);
}
return lock_ptr;
}
/a.18.1.c
0,0 → 1,67
/* { dg-do run } */
 
#include <omp.h>
#include <stdio.h>
 
extern void abort (void);
 
#define NUMBER_OF_THREADS 4
 
int synch[NUMBER_OF_THREADS];
int work[NUMBER_OF_THREADS];
int result[NUMBER_OF_THREADS];
int
fn1 (int i)
{
return i * 2;
}
 
int
fn2 (int a, int b)
{
return a + b;
}
 
int
main ()
{
int i, iam, neighbor;
omp_set_num_threads (NUMBER_OF_THREADS);
#pragma omp parallel private(iam,neighbor) shared(work,synch)
{
iam = omp_get_thread_num ();
synch[iam] = 0;
#pragma omp barrier
/*Do computation into my portion of work array */
work[iam] = fn1 (iam);
/* Announce that I am done with my work. The first flush
* ensures that my work is made visible before synch.
* The second flush ensures that synch is made visible.
*/
#pragma omp flush(work,synch)
synch[iam] = 1;
#pragma omp flush(synch)
/* Wait for neighbor. The first flush ensures that synch is read
* from memory, rather than from the temporary view of memory.
* The second flush ensures that work is read from memory, and
* is done so after the while loop exits.
*/
neighbor = (iam > 0 ? iam : omp_get_num_threads ()) - 1;
while (synch[neighbor] == 0)
{
#pragma omp flush(synch)
}
#pragma omp flush(work,synch)
/* Read neighbor's values of work array */
result[iam] = fn2 (work[neighbor], work[iam]);
}
/* output result here */
for (i = 0; i < NUMBER_OF_THREADS; i++)
{
neighbor = (i > 0 ? i : NUMBER_OF_THREADS) - 1;
if (result[i] != i * 2 + neighbor * 2)
abort ();
}
 
return 0;
}
/a.36.1.c
0,0 → 1,31
/* { dg-do run } */
 
#include <omp.h>
#include <stdlib.h>
void
do_by_16 (float *x, int iam, int ipoints)
{
}
 
void
a36 (float *x, int npoints)
{
int iam, ipoints;
omp_set_dynamic (0);
omp_set_num_threads (16);
#pragma omp parallel shared(x, npoints) private(iam, ipoints)
{
if (omp_get_num_threads () != 16)
abort ();
iam = omp_get_thread_num ();
ipoints = npoints / 16;
do_by_16 (x, iam, ipoints);
}
}
 
int main()
{
float a[10];
a36 (a, 10);
return 0;
}
/a.19.1.c
0,0 → 1,55
/* { dg-do run } */
 
int x, *p = &x;
extern void abort (void);
void
f1 (int *q)
{
*q = 1;
#pragma omp flush
/* x, p, and *q are flushed */
/* because they are shared and accessible */
/* q is not flushed because it is not shared. */
}
 
void
f2 (int *q)
{
#pragma omp barrier
*q = 2;
#pragma omp barrier
/* a barrier implies a flush */
/* x, p, and *q are flushed */
/* because they are shared and accessible */
/* q is not flushed because it is not shared. */
}
 
int
g (int n)
{
int i = 1, j, sum = 0;
*p = 1;
#pragma omp parallel reduction(+: sum) num_threads(2)
{
f1 (&j);
/* i, n and sum were not flushed */
/* because they were not accessible in f1 */
/* j was flushed because it was accessible */
sum += j;
f2 (&j);
/* i, n, and sum were not flushed */
/* because they were not accessible in f2 */
/* j was flushed because it was accessible */
sum += i + j + *p + n;
}
return sum;
}
 
int
main ()
{
int result = g (10);
if (result != 30)
abort ();
return 0;
}
/a.29.1.c
0,0 → 1,30
/* { dg-do run } */
 
#include <assert.h>
int A[2][2] = { 1, 2, 3, 4 };
void
f (int n, int B[n][n], int C[])
{
int D[2][2] = { 1, 2, 3, 4 };
int E[n][n];
assert (n >= 2);
E[1][1] = 4;
#pragma omp parallel firstprivate(B, C, D, E)
{
assert (sizeof (B) == sizeof (int (*)[n]));
assert (sizeof (C) == sizeof (int *));
assert (sizeof (D) == 4 * sizeof (int));
assert (sizeof (E) == n * n * sizeof (int));
/* Private B and C have values of original B and C. */
assert (&B[1][1] == &A[1][1]);
assert (&C[3] == &A[1][1]);
assert (D[1][1] == 4);
assert (E[1][1] == 4);
}
}
int
main ()
{
f (2, A, A[0]);
return 0;
}
/a.39.1.c
0,0 → 1,38
/* { dg-do run } */
 
#include <stdio.h>
#include <omp.h>
void
skip (int i)
{
}
 
void
work (int i)
{
}
int
main ()
{
omp_lock_t lck;
int id;
omp_init_lock (&lck);
#pragma omp parallel shared(lck) private(id)
{
id = omp_get_thread_num ();
omp_set_lock (&lck);
/* only one thread at a time can execute this printf */
printf ("My thread id is %d.\n", id);
omp_unset_lock (&lck);
while (!omp_test_lock (&lck))
{
skip (id); /* we do not yet have the lock,
so we must do something else */
}
work (id); /* we now have the lock
and can do the work */
omp_unset_lock (&lck);
}
omp_destroy_lock (&lck);
return 0;
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.