OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [ecos-3.0/] [packages/] [compat/] [posix/] [current/] [tests/] [tm_posix.cxx] - Blame information for rev 810

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 786 skrzyp
//==========================================================================
2
//
3
//        tm_posix.cxx
4
//
5
//        Basic timing test / scaffolding
6
//
7
//==========================================================================
8
// ####ECOSGPLCOPYRIGHTBEGIN####                                            
9
// -------------------------------------------                              
10
// This file is part of eCos, the Embedded Configurable Operating System.   
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Free Software Foundation, Inc.
12
//
13
// eCos is free software; you can redistribute it and/or modify it under    
14
// the terms of the GNU General Public License as published by the Free     
15
// Software Foundation; either version 2 or (at your option) any later      
16
// version.                                                                 
17
//
18
// eCos is distributed in the hope that it will be useful, but WITHOUT      
19
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or    
20
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License    
21
// for more details.                                                        
22
//
23
// You should have received a copy of the GNU General Public License        
24
// along with eCos; if not, write to the Free Software Foundation, Inc.,    
25
// 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.            
26
//
27
// As a special exception, if other files instantiate templates or use      
28
// macros or inline functions from this file, or you compile this file      
29
// and link it with other works to produce a work based on this file,       
30
// this file does not by itself cause the resulting work to be covered by   
31
// the GNU General Public License. However the source code for this file    
32
// must still be made available in accordance with section (3) of the GNU   
33
// General Public License v2.                                               
34
//
35
// This exception does not invalidate any other reasons why a work based    
36
// on this file might be covered by the GNU General Public License.         
37
// -------------------------------------------                              
38
// ####ECOSGPLCOPYRIGHTEND####                                              
39
//==========================================================================
40
//#####DESCRIPTIONBEGIN####
41
//
42
// Author(s):     gthomas,nickg
43
// Contributors:  jlarmour
44
// Date:          1998-10-19
45
// Description:   Very simple kernel timing test
46
//####DESCRIPTIONEND####
47
//==========================================================================
48
 
49
 
50
#include <cyg/infra/testcase.h>
51
#include <cyg/infra/diag.h>
52
#include <pkgconf/posix.h>
53
#include <pkgconf/system.h>
54
#ifdef CYGPKG_KERNEL
55
#include <pkgconf/kernel.h>
56
#endif
57
 
58
#ifndef CYGPKG_POSIX_SIGNALS
59
#define NA_MSG "No POSIX signals"
60
#elif !defined(CYGPKG_POSIX_TIMERS)
61
#define NA_MSG "No POSIX timers"
62
#elif !defined(CYGPKG_POSIX_PTHREAD)
63
#define NA_MSG "POSIX threads not enabled"
64
#elif !defined(CYGFUN_KERNEL_API_C)
65
#define NA_MSG "Kernel C API not enabled"
66
#elif !defined(CYGSEM_KERNEL_SCHED_MLQUEUE)
67
#define NA_MSG "Kernel mlqueue scheduler not enabled"
68
#elif !defined(CYGVAR_KERNEL_COUNTERS_CLOCK)
69
#define NA_MSG "Kernel clock not enabled"
70
#elif CYGNUM_KERNEL_SCHED_PRIORITIES <= 12
71
#define NA_MSG "Kernel scheduler properties <= 12"
72
#elif !defined(CYGPKG_POSIX_SEMAPHORES)
73
#define NA_MSG "POSIX semaphores not enabled"
74
#endif
75
 
76
//==========================================================================
77
 
78
#ifdef NA_MSG
79
extern "C" void
80
cyg_start(void)
81
{
82
    CYG_TEST_INIT();
83
    CYG_TEST_NA(NA_MSG);
84
}
85
#else
86
 
87
#include <pkgconf/kernel.h>
88
#include <pkgconf/hal.h>
89
 
90
#include <cyg/kernel/sched.hxx>
91
#include <cyg/kernel/thread.hxx>
92
#include <cyg/kernel/thread.inl>
93
#include <cyg/kernel/mutex.hxx>
94
#include <cyg/kernel/sema.hxx>
95
#include <cyg/kernel/sched.inl>
96
#include <cyg/kernel/clock.hxx>
97
#include <cyg/kernel/clock.inl>
98
#include <cyg/kernel/kapi.h>
99
 
100
#include <cyg/infra/testcase.h>
101
 
102
#include <cyg/kernel/test/stackmon.h>
103
#include CYGHWR_MEMORY_LAYOUT_H
104
 
105
 
106
// POSIX headers
107
 
108
#include <sys/types.h>
109
#include <pthread.h>
110
#include <semaphore.h>
111
#include <time.h>
112
#include <signal.h>
113
#include <errno.h>
114
 
115
//==========================================================================
116
// Define this to see the statistics with the first sample datum removed.
117
// This can expose the effects of caches on the speed of operations.
118
 
119
#undef STATS_WITHOUT_FIRST_SAMPLE
120
 
121
//==========================================================================
122
 
123
// Structure used to keep track of times
124
typedef struct fun_times {
125
    cyg_uint32 start;
126
    cyg_uint32 end;
127
} fun_times;
128
 
129
//==========================================================================
130
 
131
#define STACK_SIZE (PTHREAD_STACK_MIN*2)
132
 
133
// Defaults
134
#define NTEST_THREADS    16
135
#define NMUTEXES         32
136
#define NMBOXES          32
137
#define NSEMAPHORES      32
138
#define NTIMERS          32
139
 
140
 
141
#define NSAMPLES         32
142
#define NTHREAD_SWITCHES 128
143
#define NSCHEDS          128
144
 
145
#define NSAMPLES_SIM         2
146
#define NTEST_THREADS_SIM    2
147
#define NTHREAD_SWITCHES_SIM 4
148
#define NMUTEXES_SIM         2
149
#define NMBOXES_SIM          2
150
#define NSEMAPHORES_SIM      2
151
#define NSCHEDS_SIM          4
152
#define NTIMERS_SIM          2
153
 
154
//==========================================================================
155
 
156
static int nsamples;
157
static int ntest_threads;
158
static int nthread_switches;
159
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
160
static int nmutexes;
161
#endif
162
static int nmboxes;
163
static int nsemaphores;
164
static int nscheds;
165
static int ntimers;
166
 
167
static char stacks[NTEST_THREADS][STACK_SIZE];
168
static pthread_t threads[NTEST_THREADS];
169
static int overhead;
170
static sem_t synchro;
171
static fun_times thread_ft[NTEST_THREADS];
172
 
173
static fun_times test2_ft[NTHREAD_SWITCHES];
174
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
175
static pthread_mutex_t test_mutexes[NMUTEXES];
176
static fun_times mutex_ft[NMUTEXES];
177
static pthread_t mutex_test_thread_handle;
178
#endif
179
#if 0
180
static cyg_mbox test_mboxes[NMBOXES];
181
static cyg_handle_t test_mbox_handles[NMBOXES];
182
static fun_times mbox_ft[NMBOXES];
183
static cyg_thread mbox_test_thread;
184
static cyg_handle_t mbox_test_thread_handle;
185
#endif
186
 
187
static sem_t test_semaphores[NSEMAPHORES];
188
static fun_times semaphore_ft[NSEMAPHORES];
189
static pthread_t semaphore_test_thread_handle;
190
 
191
static fun_times sched_ft[NSCHEDS];
192
 
193
static timer_t timers[NTIMERS];
194
static fun_times timer_ft[NTIMERS];
195
 
196
static long rtc_resolution[] = CYGNUM_KERNEL_COUNTERS_RTC_RESOLUTION;
197
static long ns_per_system_clock;
198
 
199
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY)
200
// Data kept by kernel real time clock measuring clock interrupt latency
201
extern cyg_tick_count total_clock_latency, total_clock_interrupts;
202
extern cyg_int32 min_clock_latency, max_clock_latency;
203
extern bool measure_clock_latency;
204
#endif
205
 
206
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
207
extern cyg_tick_count total_clock_dsr_latency, total_clock_dsr_calls;
208
extern cyg_int32 min_clock_dsr_latency, max_clock_dsr_latency;
209
extern bool measure_clock_latency;
210
#endif
211
 
212
//==========================================================================
213
 
214
void run_sched_tests(void);
215
void run_thread_tests(void);
216
void run_thread_switch_test(void);
217
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
218
void run_mutex_tests(void);
219
void run_mutex_circuit_test(void);
220
#endif
221
void run_mbox_tests(void);
222
void run_mbox_circuit_test(void);
223
void run_semaphore_tests(void);
224
void run_semaphore_circuit_test(void);
225
void run_timer_tests(void);
226
 
227
//==========================================================================
228
 
229
#ifndef max
230
#define max(n,m) (m > n ? n : m)
231
#endif
232
 
233
//==========================================================================
234
// Wait until a clock tick [real time clock] has passed.  This should keep it
235
// from happening again during a measurement, thus minimizing any fluctuations
236
void
237
wait_for_tick(void)
238
{
239
    cyg_tick_count_t tv0, tv1;
240
    tv0 = cyg_current_time();
241
    while (true) {
242
        tv1 = cyg_current_time();
243
        if (tv1 != tv0) break;
244
    }
245
}
246
 
247
//--------------------------------------------------------------------------
248
// Display a number of ticks as microseconds
249
// Note: for improved calculation significance, values are kept in ticks*1000
250
void
251
show_ticks_in_us(cyg_uint32 ticks)
252
{
253
    long long ns;
254
    ns = (ns_per_system_clock * (long long)ticks) / CYGNUM_KERNEL_COUNTERS_RTC_PERIOD;
255
    ns += 5;  // for rounding to .01us
256
    diag_printf("%5d.%02d", (int)(ns/1000), (int)((ns%1000)/10));
257
}
258
 
259
//--------------------------------------------------------------------------
260
//
261
// If the kernel is instrumented to measure clock interrupt latency, these
262
// measurements can be drastically perturbed by printing via "diag_printf()"
263
// since that code may run with interrupts disabled for long periods.
264
//
265
// In order to get accurate/reasonable latency figures _for the kernel 
266
// primitive functions beint tested_, the kernel's latency measurements
267
// are suspended while the printing actually takes place.
268
//
269
// The measurements are reenabled after the printing, thus allowing for
270
// fair measurements of the kernel primitives, which are not distorted
271
// by the printing mechanisms.
272
 
273
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
274
void
275
disable_clock_latency_measurement(void)
276
{
277
    wait_for_tick();
278
    measure_clock_latency = false;
279
}
280
 
281
void
282
enable_clock_latency_measurement(void)
283
{
284
    wait_for_tick();
285
    measure_clock_latency = true;
286
}
287
 
288
// Ensure that the measurements are reasonable (no startup anomalies)
289
void
290
reset_clock_latency_measurement(void)
291
{
292
  disable_clock_latency_measurement();
293
  total_clock_latency = 0;
294
  total_clock_interrupts = 0;
295
  min_clock_latency = 0x7FFFFFFF;
296
  max_clock_latency = 0;
297
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)  
298
  total_clock_dsr_latency = 0;
299
  total_clock_dsr_calls = 0;
300
  min_clock_dsr_latency = 0x7FFFFFFF;
301
  max_clock_dsr_latency = 0;
302
#endif  
303
  enable_clock_latency_measurement();
304
 
305
}
306
#else
307
#define disable_clock_latency_measurement()
308
#define enable_clock_latency_measurement()
309
#define reset_clock_latency_measurement()
310
#endif
311
 
312
//--------------------------------------------------------------------------
313
 
314
void
315
show_times_hdr(void)
316
{
317
    disable_clock_latency_measurement();
318
    diag_printf("\n");
319
    diag_printf("                                 Confidence\n");
320
    diag_printf("     Ave     Min     Max     Var  Ave  Min  Function\n");
321
    diag_printf("  ======  ======  ======  ====== ========== ========\n");
322
    enable_clock_latency_measurement();
323
}
324
 
325
void
326
show_times_detail(fun_times ft[], int nsamples, char *title, bool ignore_first)
327
{
328
    int i, delta, min, max, con_ave, con_min, ave_dev;
329
    int start_sample, total_samples;
330
    cyg_int32 total, ave;
331
 
332
    if (ignore_first) {
333
        start_sample = 1;
334
        total_samples = nsamples-1;
335
    } else {
336
        start_sample = 0;
337
        total_samples = nsamples;
338
    }
339
    total = 0;
340
    min = 0x7FFFFFFF;
341
    max = 0;
342
    for (i = start_sample;  i < nsamples;  i++) {
343
        if (ft[i].end < ft[i].start) {
344
            // Clock wrapped around (timer tick)
345
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
346
        } else {
347
            delta = ft[i].end - ft[i].start;
348
        }
349
        delta -= overhead;
350
        if (delta < 0) delta = 0;
351
        delta *= 1000;
352
        total += delta;
353
        if (delta < min) min = delta;
354
        if (delta > max) max = delta;
355
    }
356
    ave = total / total_samples;
357
    total = 0;
358
    ave_dev = 0;
359
    for (i = start_sample;  i < nsamples;  i++) {
360
        if (ft[i].end < ft[i].start) {
361
            // Clock wrapped around (timer tick)
362
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
363
        } else {
364
            delta = ft[i].end - ft[i].start;
365
        }
366
        delta -= overhead;
367
        if (delta < 0) delta = 0;
368
        delta *= 1000;
369
        delta = delta - ave;
370
        if (delta < 0) delta = -delta;
371
        ave_dev += delta;
372
    }
373
    ave_dev /= total_samples;
374
    con_ave = 0;
375
    con_min = 0;
376
    for (i = start_sample;  i < nsamples;  i++) {
377
        if (ft[i].end < ft[i].start) {
378
            // Clock wrapped around (timer tick)
379
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
380
        } else {
381
            delta = ft[i].end - ft[i].start;
382
        }
383
        delta -= overhead;
384
        if (delta < 0) delta = 0;
385
        delta *= 1000;
386
        if ((delta <= (ave+ave_dev)) && (delta >= (ave-ave_dev))) con_ave++;
387
        if ((delta <= (min+ave_dev)) && (delta >= (min-ave_dev))) con_min++;
388
    }
389
    con_ave = (con_ave * 100) / total_samples;
390
    con_min = (con_min * 100) / total_samples;
391
    show_ticks_in_us(ave);
392
    show_ticks_in_us(min);
393
    show_ticks_in_us(max);
394
    show_ticks_in_us(ave_dev);
395
    disable_clock_latency_measurement();
396
    diag_printf("  %3d%% %3d%%", con_ave, con_min);
397
    diag_printf(" %s\n", title);
398
    enable_clock_latency_measurement();
399
}
400
 
401
void
402
show_times(fun_times ft[], int nsamples, char *title)
403
{
404
    show_times_detail(ft, nsamples, title, false);
405
#ifdef STATS_WITHOUT_FIRST_SAMPLE
406
    show_times_detail(ft, nsamples, "", true);
407
#endif
408
}
409
 
410
//--------------------------------------------------------------------------
411
 
412
void
413
show_test_parameters(void)
414
{
415
    disable_clock_latency_measurement();
416
    diag_printf("\nTesting parameters:\n");
417
    diag_printf("   Clock samples:         %5d\n", nsamples);
418
    diag_printf("   Threads:               %5d\n", ntest_threads);
419
    diag_printf("   Thread switches:       %5d\n", nthread_switches);
420
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
421
    diag_printf("   Mutexes:               %5d\n", nmutexes);
422
#endif
423
    diag_printf("   Mailboxes:             %5d\n", nmboxes);
424
    diag_printf("   Semaphores:            %5d\n", nsemaphores);
425
    diag_printf("   Scheduler operations:  %5d\n", nscheds);
426
    diag_printf("   Timers:                %5d\n", ntimers);
427
    diag_printf("\n");
428
    enable_clock_latency_measurement();
429
}
430
 
431
void
432
end_of_test_group(void)
433
{
434
    disable_clock_latency_measurement();
435
    diag_printf("\n");
436
    enable_clock_latency_measurement();
437
}
438
 
439
//--------------------------------------------------------------------------
440
// Compute a name for a thread
441
 
442
char *
443
thread_name(char *basename, int indx) {
444
    return "<<NULL>>";  // Not currently used
445
}
446
 
447
//--------------------------------------------------------------------------
448
// test0 - null test, just return
449
 
450
void *
451
test0(void *indx)
452
{
453
    return indx;
454
}
455
 
456
//--------------------------------------------------------------------------
457
// test3 - loop, yeilding repeatedly and checking for cancellation
458
 
459
void *
460
test3(void *indx)
461
{
462
    for(;;)
463
    {
464
        sched_yield();
465
        pthread_testcancel();
466
    }
467
 
468
    return indx;
469
}
470
 
471
//--------------------------------------------------------------------------
472
// test1 - empty test, simply exit.  Last thread signals parent.
473
 
474
void *
475
test1( void *indx)
476
{
477
    if ((cyg_uint32)indx == (cyg_uint32)(ntest_threads-1)) {
478
        sem_post(&synchro);  // Signal that last thread is dying
479
    }
480
    return indx;
481
}
482
 
483
//--------------------------------------------------------------------------
484
// test2 - measure thread switch times
485
 
486
void *
487
test2(void *indx)
488
{
489
    int i;
490
    for (i = 0;  i < nthread_switches;  i++) {
491
        if ((int)indx == 0) {
492
            HAL_CLOCK_READ(&test2_ft[i].start);
493
        } else {
494
            HAL_CLOCK_READ(&test2_ft[i].end);
495
        }
496
        sched_yield();
497
    }
498
    if ((int)indx == 1) {
499
        sem_post(&synchro);
500
    }
501
 
502
    return indx;
503
}
504
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
505
//--------------------------------------------------------------------------
506
// Full-circuit mutex unlock/lock test
507
 
508
void *
509
mutex_test(void * indx)
510
{
511
    int i;
512
    pthread_mutex_lock(&test_mutexes[0]);
513
    for (i = 0;  i < nmutexes;  i++) {
514
        sem_wait(&synchro);
515
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
516
        HAL_CLOCK_READ(&mutex_ft[i].start);
517
        pthread_mutex_unlock(&test_mutexes[0]);
518
        pthread_mutex_lock(&test_mutexes[0]);
519
        sem_post(&synchro);
520
    }
521
    return indx;
522
}
523
 
524
//--------------------------------------------------------------------------
525
// Full-circuit mbox put/get test
526
 
527
#if 0
528
void
529
mbox_test(cyg_uint32 indx)
530
{
531
    void *item;
532
    do {
533
        item = cyg_mbox_get(test_mbox_handles[0]);
534
        HAL_CLOCK_READ(&mbox_ft[(int)item].end);
535
        cyg_semaphore_post(&synchro);
536
    } while ((int)item != (nmboxes-1));
537
    cyg_thread_exit(0);
538
}
539
#endif
540
#endif
541
//--------------------------------------------------------------------------
542
// Full-circuit semaphore post/wait test
543
 
544
void *
545
semaphore_test(void * indx)
546
{
547
    int i;
548
    for (i = 0;  i < nsemaphores;  i++) {
549
        sem_wait(&test_semaphores[0]);
550
        HAL_CLOCK_READ(&semaphore_ft[i].end);
551
        sem_post(&synchro);
552
    }
553
    return indx;
554
}
555
 
556
//--------------------------------------------------------------------------
557
//
558
// This set of tests is used to measure kernel primitives that deal with threads
559
//
560
 
561
void
562
run_thread_tests(void)
563
{
564
 
565
 
566
    int i;
567
    struct sched_param schedparam;
568
    pthread_attr_t attr;
569
    int policy;
570
    void *retval;
571
 
572
    // Set my priority higher than any I plan to create
573
    schedparam.sched_priority = 30;
574
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
575
 
576
    // Initiaize thread creation attributes
577
 
578
    pthread_attr_init( &attr );
579
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
580
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
581
    schedparam.sched_priority = 10;
582
    pthread_attr_setschedparam( &attr, &schedparam );
583
 
584
 
585
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
586
    for (i = 0;  i < ntest_threads;  i++) {
587
        HAL_CLOCK_READ(&thread_ft[i].start);
588
 
589
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
590
        pthread_attr_setstacksize( &attr, STACK_SIZE );
591
        pthread_create( &threads[i],
592
                        &attr,
593
                        test0,
594
                        (void *)i
595
                        );
596
 
597
        HAL_CLOCK_READ(&thread_ft[i].end);
598
    }
599
    show_times(thread_ft, ntest_threads, "Create thread");
600
 
601
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
602
    for (i = 0;  i < ntest_threads;  i++) {
603
        HAL_CLOCK_READ(&thread_ft[i].start);
604
        sched_yield();
605
        HAL_CLOCK_READ(&thread_ft[i].end);
606
    }
607
    show_times(thread_ft, ntest_threads, "Yield thread [all lower priority]");
608
 
609
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
610
    for (i = 0;  i < ntest_threads;  i++) {
611
        HAL_CLOCK_READ(&thread_ft[i].start);
612
 
613
        schedparam.sched_priority = 11;
614
        pthread_attr_setschedparam( &attr, &schedparam );
615
        pthread_setschedparam(threads[i], SCHED_RR, &schedparam);
616
 
617
        HAL_CLOCK_READ(&thread_ft[i].end);
618
    }
619
    show_times(thread_ft, ntest_threads, "Set priority");
620
 
621
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
622
    for (i = 0;  i < ntest_threads;  i++) {
623
        HAL_CLOCK_READ(&thread_ft[i].start);
624
        pthread_getschedparam( threads[i], &policy, &schedparam );
625
        HAL_CLOCK_READ(&thread_ft[i].end);
626
    }
627
    show_times(thread_ft, ntest_threads, "Get priority");
628
 
629
    cyg_thread_delay(1);        // Let the test threads run
630
 
631
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
632
    for (i = 0;  i < ntest_threads;  i++) {
633
        HAL_CLOCK_READ(&thread_ft[i].start);
634
        pthread_join(threads[i], &retval);
635
        HAL_CLOCK_READ(&thread_ft[i].end);
636
    }
637
    show_times(thread_ft, ntest_threads, "Join exited thread");
638
 
639
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
640
    for (i = 0;  i < ntest_threads;  i++) {
641
        HAL_CLOCK_READ(&thread_ft[i].start);
642
        sched_yield();
643
        HAL_CLOCK_READ(&thread_ft[i].end);
644
    }
645
    show_times(thread_ft, ntest_threads, "Yield [no other] thread");
646
 
647
 
648
    // Recreate the test set
649
 
650
    schedparam.sched_priority = 10;
651
    pthread_attr_setschedparam( &attr, &schedparam );
652
 
653
    for (i = 0;  i < ntest_threads;  i++) {
654
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
655
        pthread_attr_setstacksize( &attr, STACK_SIZE );
656
        pthread_create( &threads[i],
657
                        &attr,
658
                        test3,
659
                        (void *)i
660
                        );
661
    }
662
 
663
    cyg_thread_delay(1);        // Let the test threads run    
664
 
665
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
666
    for (i = 0;  i < ntest_threads;  i++) {
667
        HAL_CLOCK_READ(&thread_ft[i].start);
668
        pthread_cancel(threads[i]);
669
        HAL_CLOCK_READ(&thread_ft[i].end);
670
    }
671
    show_times(thread_ft, ntest_threads, "Cancel [running] thread");
672
 
673
    cyg_thread_delay(1);        // Let the test threads do their cancellations
674
 
675
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
676
    for (i = 0;  i < ntest_threads;  i++) {
677
        HAL_CLOCK_READ(&thread_ft[i].start);
678
        pthread_join(threads[i], &retval);
679
        HAL_CLOCK_READ(&thread_ft[i].end);
680
    }
681
    show_times(thread_ft, ntest_threads, "Join [cancelled] thread");
682
 
683
 
684
    // Set my priority lower than any I plan to create
685
    schedparam.sched_priority = 5;
686
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
687
 
688
    // Set up the end-of-threads synchronizer
689
    sem_init(&synchro, 0, 0);
690
 
691
    schedparam.sched_priority = 10;
692
    pthread_attr_setschedparam( &attr, &schedparam );
693
 
694
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
695
    for (i = 0;  i < ntest_threads;  i++) {
696
        HAL_CLOCK_READ(&thread_ft[i].start);
697
 
698
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
699
        pthread_attr_setstacksize( &attr, STACK_SIZE );
700
        pthread_create( &threads[i],
701
                        &attr,
702
                        test2,
703
                        (void *)i
704
                        );
705
 
706
        HAL_CLOCK_READ(&thread_ft[i].end);
707
    }
708
    show_times(thread_ft, ntest_threads, "Create [high priority] thread");
709
 
710
    sem_wait(&synchro);  // Wait for all threads to finish
711
 
712
    // Make sure they are all dead
713
    for (i = 0;  i < ntest_threads;  i++) {
714
        pthread_join(threads[i], &retval);
715
    }
716
 
717
    run_thread_switch_test();
718
    end_of_test_group();
719
 
720
}
721
 
722
//--------------------------------------------------------------------------
723
 
724
void
725
run_thread_switch_test(void)
726
{
727
 
728
    int i;
729
    struct sched_param schedparam;
730
    pthread_attr_t attr;
731
    void *retval;
732
 
733
    // Set my priority higher than any I plan to create
734
    schedparam.sched_priority = 30;
735
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
736
 
737
    // Initiaize thread creation attributes
738
 
739
    pthread_attr_init( &attr );
740
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
741
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
742
    schedparam.sched_priority = 10;
743
    pthread_attr_setschedparam( &attr, &schedparam );
744
 
745
    // Set up the end-of-threads synchronizer
746
 
747
    sem_init(&synchro, 0, 0);
748
 
749
    // Set up for thread context switch 
750
 
751
    for (i = 0;  i < 2;  i++) {
752
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
753
        pthread_attr_setstacksize( &attr, STACK_SIZE );
754
        pthread_create( &threads[i],
755
                        &attr,
756
                        test2,
757
                        (void *)i
758
                        );
759
    }
760
 
761
    wait_for_tick(); // Wait until the next clock tick to minimize aberations    
762
 
763
    sem_wait(&synchro);
764
 
765
    show_times(test2_ft, nthread_switches, "Thread switch");
766
 
767
    // Clean up
768
    for (i = 0;  i < 2;  i++) {
769
        pthread_join(threads[i], &retval);
770
    }
771
 
772
}
773
 
774
 
775
//--------------------------------------------------------------------------
776
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
777
void
778
run_mutex_tests(void)
779
{
780
 
781
    int i;
782
    pthread_mutexattr_t attr;
783
 
784
    pthread_mutexattr_init( &attr );
785
 
786
    // Mutex primitives
787
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
788
    for (i = 0;  i < nmutexes;  i++) {
789
        HAL_CLOCK_READ(&mutex_ft[i].start);
790
        pthread_mutex_init(&test_mutexes[i], &attr);
791
        HAL_CLOCK_READ(&mutex_ft[i].end);
792
    }
793
    show_times(mutex_ft, nmutexes, "Init mutex");
794
 
795
 
796
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
797
    for (i = 0;  i < nmutexes;  i++) {
798
        HAL_CLOCK_READ(&mutex_ft[i].start);
799
        pthread_mutex_lock(&test_mutexes[i]);
800
        HAL_CLOCK_READ(&mutex_ft[i].end);
801
    }
802
    show_times(mutex_ft, nmutexes, "Lock [unlocked] mutex");
803
 
804
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
805
    for (i = 0;  i < nmutexes;  i++) {
806
        HAL_CLOCK_READ(&mutex_ft[i].start);
807
        pthread_mutex_unlock(&test_mutexes[i]);
808
        HAL_CLOCK_READ(&mutex_ft[i].end);
809
    }
810
    show_times(mutex_ft, nmutexes, "Unlock [locked] mutex");
811
 
812
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
813
    for (i = 0;  i < nmutexes;  i++) {
814
        HAL_CLOCK_READ(&mutex_ft[i].start);
815
        pthread_mutex_trylock(&test_mutexes[i]);
816
        HAL_CLOCK_READ(&mutex_ft[i].end);
817
    }
818
    show_times(mutex_ft, nmutexes, "Trylock [unlocked] mutex");
819
 
820
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
821
    for (i = 0;  i < nmutexes;  i++) {
822
        HAL_CLOCK_READ(&mutex_ft[i].start);
823
        pthread_mutex_trylock(&test_mutexes[i]);
824
        HAL_CLOCK_READ(&mutex_ft[i].end);
825
    }
826
    show_times(mutex_ft, nmutexes, "Trylock [locked] mutex");
827
 
828
    // Must unlock mutices before destroying them.
829
    for (i = 0;  i < nmutexes;  i++) {
830
        pthread_mutex_unlock(&test_mutexes[i]);
831
    }
832
 
833
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
834
    for (i = 0;  i < nmutexes;  i++) {
835
        HAL_CLOCK_READ(&mutex_ft[i].start);
836
        pthread_mutex_destroy(&test_mutexes[i]);
837
        HAL_CLOCK_READ(&mutex_ft[i].end);
838
    }
839
    show_times(mutex_ft, nmutexes, "Destroy mutex");
840
 
841
 
842
    run_mutex_circuit_test();
843
    end_of_test_group();
844
}
845
 
846
//--------------------------------------------------------------------------
847
 
848
void
849
run_mutex_circuit_test(void)
850
{
851
    int i;
852
    pthread_mutexattr_t mattr;
853
    struct sched_param schedparam;
854
    pthread_attr_t attr;
855
    void *retval;
856
 
857
    // Set my priority lower than any I plan to create
858
    schedparam.sched_priority = 5;
859
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
860
 
861
    // Initiaize thread creation attributes
862
 
863
    pthread_attr_init( &attr );
864
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
865
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
866
    schedparam.sched_priority = 10;
867
    pthread_attr_setschedparam( &attr, &schedparam );
868
 
869
    // Set up for full mutex unlock/lock test
870
    pthread_mutexattr_init( &mattr );
871
    pthread_mutex_init(&test_mutexes[0], &mattr);
872
    sem_init(&synchro, 0, 0);
873
 
874
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
875
    pthread_attr_setstacksize( &attr, STACK_SIZE );
876
    pthread_create( &mutex_test_thread_handle,
877
                    &attr,
878
                    mutex_test,
879
                    (void *)0
880
        );
881
 
882
    // Need to raise priority so that this thread will block on the "lock"
883
    schedparam.sched_priority = 20;
884
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
885
 
886
    for (i = 0;  i < nmutexes;  i++) {
887
        sem_post(&synchro);
888
        pthread_mutex_lock(&test_mutexes[0]);
889
        HAL_CLOCK_READ(&mutex_ft[i].end);
890
        pthread_mutex_unlock(&test_mutexes[0]);
891
        sem_wait(&synchro);
892
    }
893
    pthread_join(mutex_test_thread_handle, &retval);
894
    show_times(mutex_ft, nmutexes, "Unlock/Lock mutex");
895
 
896
}
897
 
898
#endif
899
//--------------------------------------------------------------------------
900
// Message queue tests
901
 
902
// Currently disabled, pending implementation of POSIX message queues
903
 
904
#if 0
905
void
906
run_mbox_tests(void)
907
{
908
    int i, cnt;
909
    void *item;
910
    // Mailbox primitives
911
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
912
    for (i = 0;  i < nmboxes;  i++) {
913
        HAL_CLOCK_READ(&mbox_ft[i].start);
914
        cyg_mbox_create(&test_mbox_handles[i], &test_mboxes[i]);
915
        HAL_CLOCK_READ(&mbox_ft[i].end);
916
    }
917
    show_times(mbox_ft, nmboxes, "Create mbox");
918
 
919
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
920
    for (i = 0;  i < nmboxes;  i++) {
921
        HAL_CLOCK_READ(&mbox_ft[i].start);
922
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
923
        HAL_CLOCK_READ(&mbox_ft[i].end);
924
    }
925
    show_times(mbox_ft, nmboxes, "Peek [empty] mbox");
926
 
927
#ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
928
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
929
    for (i = 0;  i < nmboxes;  i++) {
930
        HAL_CLOCK_READ(&mbox_ft[i].start);
931
        cyg_mbox_put(test_mbox_handles[i], (void *)i);
932
        HAL_CLOCK_READ(&mbox_ft[i].end);
933
    }
934
    show_times(mbox_ft, nmboxes, "Put [first] mbox");
935
 
936
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
937
    for (i = 0;  i < nmboxes;  i++) {
938
        HAL_CLOCK_READ(&mbox_ft[i].start);
939
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
940
        HAL_CLOCK_READ(&mbox_ft[i].end);
941
    }
942
    show_times(mbox_ft, nmboxes, "Peek [1 msg] mbox");
943
 
944
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
945
    for (i = 0;  i < nmboxes;  i++) {
946
        HAL_CLOCK_READ(&mbox_ft[i].start);
947
        cyg_mbox_put(test_mbox_handles[i], (void *)i);
948
        HAL_CLOCK_READ(&mbox_ft[i].end);
949
    }
950
    show_times(mbox_ft, nmboxes, "Put [second] mbox");
951
 
952
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
953
    for (i = 0;  i < nmboxes;  i++) {
954
        HAL_CLOCK_READ(&mbox_ft[i].start);
955
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
956
        HAL_CLOCK_READ(&mbox_ft[i].end);
957
    }
958
    show_times(mbox_ft, nmboxes, "Peek [2 msgs] mbox");
959
 
960
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
961
    for (i = 0;  i < nmboxes;  i++) {
962
        HAL_CLOCK_READ(&mbox_ft[i].start);
963
        item = cyg_mbox_get(test_mbox_handles[i]);
964
        HAL_CLOCK_READ(&mbox_ft[i].end);
965
    }
966
    show_times(mbox_ft, nmboxes, "Get [first] mbox");
967
 
968
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
969
    for (i = 0;  i < nmboxes;  i++) {
970
        HAL_CLOCK_READ(&mbox_ft[i].start);
971
        item = cyg_mbox_get(test_mbox_handles[i]);
972
        HAL_CLOCK_READ(&mbox_ft[i].end);
973
    }
974
    show_times(mbox_ft, nmboxes, "Get [second] mbox");
975
#endif // ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
976
 
977
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
978
    for (i = 0;  i < nmboxes;  i++) {
979
        HAL_CLOCK_READ(&mbox_ft[i].start);
980
        cyg_mbox_tryput(test_mbox_handles[i], (void *)i);
981
        HAL_CLOCK_READ(&mbox_ft[i].end);
982
    }
983
    show_times(mbox_ft, nmboxes, "Tryput [first] mbox");
984
 
985
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
986
    for (i = 0;  i < nmboxes;  i++) {
987
        HAL_CLOCK_READ(&mbox_ft[i].start);
988
        item = cyg_mbox_peek_item(test_mbox_handles[i]);
989
        HAL_CLOCK_READ(&mbox_ft[i].end);
990
    }
991
    show_times(mbox_ft, nmboxes, "Peek item [non-empty] mbox");
992
 
993
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
994
    for (i = 0;  i < nmboxes;  i++) {
995
        HAL_CLOCK_READ(&mbox_ft[i].start);
996
        item = cyg_mbox_tryget(test_mbox_handles[i]);
997
        HAL_CLOCK_READ(&mbox_ft[i].end);
998
    }
999
    show_times(mbox_ft, nmboxes, "Tryget [non-empty] mbox");
1000
 
1001
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1002
    for (i = 0;  i < nmboxes;  i++) {
1003
        HAL_CLOCK_READ(&mbox_ft[i].start);
1004
        item = cyg_mbox_peek_item(test_mbox_handles[i]);
1005
        HAL_CLOCK_READ(&mbox_ft[i].end);
1006
    }
1007
    show_times(mbox_ft, nmboxes, "Peek item [empty] mbox");
1008
 
1009
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1010
    for (i = 0;  i < nmboxes;  i++) {
1011
        HAL_CLOCK_READ(&mbox_ft[i].start);
1012
        item = cyg_mbox_tryget(test_mbox_handles[i]);
1013
        HAL_CLOCK_READ(&mbox_ft[i].end);
1014
    }
1015
    show_times(mbox_ft, nmboxes, "Tryget [empty] mbox");
1016
 
1017
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1018
    for (i = 0;  i < nmboxes;  i++) {
1019
        HAL_CLOCK_READ(&mbox_ft[i].start);
1020
        cyg_mbox_waiting_to_get(test_mbox_handles[i]);
1021
        HAL_CLOCK_READ(&mbox_ft[i].end);
1022
    }
1023
    show_times(mbox_ft, nmboxes, "Waiting to get mbox");
1024
 
1025
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1026
    for (i = 0;  i < nmboxes;  i++) {
1027
        HAL_CLOCK_READ(&mbox_ft[i].start);
1028
        cyg_mbox_waiting_to_put(test_mbox_handles[i]);
1029
        HAL_CLOCK_READ(&mbox_ft[i].end);
1030
    }
1031
    show_times(mbox_ft, nmboxes, "Waiting to put mbox");
1032
 
1033
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1034
    for (i = 0;  i < nmboxes;  i++) {
1035
        HAL_CLOCK_READ(&mbox_ft[i].start);
1036
        cyg_mbox_delete(test_mbox_handles[i]);
1037
        HAL_CLOCK_READ(&mbox_ft[i].end);
1038
    }
1039
    show_times(mbox_ft, nmboxes, "Delete mbox");
1040
 
1041
    run_mbox_circuit_test();
1042
    end_of_test_group();
1043
}
1044
 
1045
//--------------------------------------------------------------------------
1046
 
1047
void
1048
run_mbox_circuit_test(void)
1049
{
1050
#ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
1051
    int i;
1052
    // Set my priority lower than any I plan to create
1053
    cyg_thread_set_priority(cyg_thread_self(), 3);
1054
    // Set up for full mbox put/get test
1055
    cyg_mbox_create(&test_mbox_handles[0], &test_mboxes[0]);
1056
    cyg_semaphore_init(&synchro, 0);
1057
    cyg_thread_create(2,              // Priority - just a number
1058
                      mbox_test,           // entry
1059
                      0,               // index
1060
                      thread_name("thread", 0),     // Name
1061
                      &stacks[0][0],   // Stack
1062
                      STACK_SIZE,      // Size
1063
                      &mbox_test_thread_handle,   // Handle
1064
                      &mbox_test_thread    // Thread data structure
1065
        );
1066
    cyg_thread_resume(mbox_test_thread_handle);
1067
    for (i = 0;  i < nmboxes;  i++) {
1068
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
1069
        HAL_CLOCK_READ(&mbox_ft[i].start);
1070
        cyg_mbox_put(test_mbox_handles[0], (void *)i);
1071
        cyg_semaphore_wait(&synchro);
1072
    }
1073
    cyg_thread_delete(mbox_test_thread_handle);
1074
    show_times(mbox_ft, nmboxes, "Put/Get mbox");
1075
#endif
1076
}
1077
 
1078
#endif
1079
 
1080
//--------------------------------------------------------------------------
1081
 
1082
void
1083
run_semaphore_tests(void)
1084
{
1085
 
1086
    int i;
1087
    int sem_val;
1088
 
1089
    // Semaphore primitives
1090
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1091
    for (i = 0;  i < nsemaphores;  i++) {
1092
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1093
        sem_init(&test_semaphores[i], 0, 0);
1094
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1095
    }
1096
    show_times(semaphore_ft, nsemaphores, "Init semaphore");
1097
 
1098
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1099
    for (i = 0;  i < nsemaphores;  i++) {
1100
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1101
        sem_post(&test_semaphores[i]);
1102
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1103
    }
1104
    show_times(semaphore_ft, nsemaphores, "Post [0] semaphore");
1105
 
1106
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1107
    for (i = 0;  i < nsemaphores;  i++) {
1108
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1109
        sem_wait(&test_semaphores[i]);
1110
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1111
    }
1112
    show_times(semaphore_ft, nsemaphores, "Wait [1] semaphore");
1113
 
1114
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1115
    for (i = 0;  i < nsemaphores;  i++) {
1116
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1117
        sem_trywait(&test_semaphores[i]);
1118
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1119
    }
1120
    show_times(semaphore_ft, nsemaphores, "Trywait [0] semaphore");
1121
 
1122
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1123
    for (i = 0;  i < nsemaphores;  i++) {
1124
        sem_post(&test_semaphores[i]);
1125
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1126
        sem_trywait(&test_semaphores[i]);
1127
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1128
    }
1129
    show_times(semaphore_ft, nsemaphores, "Trywait [1] semaphore");
1130
 
1131
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1132
    for (i = 0;  i < nsemaphores;  i++) {
1133
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1134
        sem_getvalue(&test_semaphores[i], &sem_val);
1135
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1136
    }
1137
    show_times(semaphore_ft, nsemaphores, "Get value of semaphore");
1138
 
1139
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1140
    for (i = 0;  i < nsemaphores;  i++) {
1141
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1142
        sem_destroy(&test_semaphores[i]);
1143
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1144
    }
1145
    show_times(semaphore_ft, nsemaphores, "Destroy semaphore");
1146
 
1147
    run_semaphore_circuit_test();
1148
    end_of_test_group();
1149
}
1150
 
1151
//--------------------------------------------------------------------------
1152
 
1153
void
1154
run_semaphore_circuit_test(void)
1155
{
1156
 
1157
    int i;
1158
    struct sched_param schedparam;
1159
    pthread_attr_t attr;
1160
    void *retval;
1161
 
1162
    // Set my priority lower than any I plan to create
1163
    schedparam.sched_priority = 5;
1164
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1165
 
1166
    // Initiaize thread creation attributes
1167
 
1168
    pthread_attr_init( &attr );
1169
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1170
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
1171
    schedparam.sched_priority = 10;
1172
    pthread_attr_setschedparam( &attr, &schedparam );
1173
 
1174
    // Set up for full semaphore post/wait test
1175
    sem_init(&test_semaphores[0], 0, 0);
1176
    sem_init(&synchro, 0, 0);
1177
 
1178
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1179
    pthread_attr_setstacksize( &attr, STACK_SIZE );
1180
    pthread_create( &semaphore_test_thread_handle,
1181
                    &attr,
1182
                    semaphore_test,
1183
                    (void *)0
1184
        );
1185
 
1186
 
1187
    for (i = 0;  i < nsemaphores;  i++) {
1188
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
1189
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1190
        sem_post(&test_semaphores[0]);
1191
        sem_wait(&synchro);
1192
    }
1193
    pthread_join(semaphore_test_thread_handle, &retval);
1194
 
1195
    show_times(semaphore_ft, nsemaphores, "Post/Wait semaphore");
1196
 
1197
 
1198
}
1199
 
1200
//--------------------------------------------------------------------------
1201
 
1202
// Timer callback function
1203
void
1204
sigrt0(int signo, siginfo_t *info, void *context)
1205
{
1206
    diag_printf("sigrt0 called\n");
1207
    // empty call back
1208
}
1209
 
1210
// Callback used to test determinancy
1211
static volatile int timer_cnt;
1212
void
1213
sigrt1(int signo, siginfo_t *info, void *context)
1214
{
1215
    if (timer_cnt == nscheds) return;
1216
    sched_ft[timer_cnt].start = 0;
1217
    HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1218
    if (timer_cnt == nscheds) {
1219
        sem_post(&synchro);
1220
    }
1221
}
1222
 
1223
static sem_t timer_sem;
1224
 
1225
static void
1226
sigrt2(int signo, siginfo_t *info, void *context)
1227
{
1228
    if (timer_cnt == nscheds) {
1229
        sem_post(&synchro);
1230
        sem_post(&timer_sem);
1231
    } else {
1232
        sched_ft[timer_cnt].start = 0;
1233
        sem_post(&timer_sem);
1234
    }
1235
}
1236
 
1237
// Null thread, used to keep scheduler busy
1238
void *
1239
timer_test(void * id)
1240
{
1241
    while (true) {
1242
        cyg_thread_yield();
1243
        pthread_testcancel();
1244
    }
1245
 
1246
    return id;
1247
}
1248
 
1249
// Thread that suspends itself at the first opportunity
1250
void *
1251
timer_test2(void *id)
1252
{
1253
    while (timer_cnt != nscheds) {
1254
        HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1255
        sem_wait(&timer_sem);
1256
    }
1257
    return id;
1258
}
1259
 
1260
void
1261
run_timer_tests(void)
1262
{
1263
    int res;
1264
    int i;
1265
    struct sigaction sa;
1266
    struct sigevent sigev;
1267
    struct itimerspec tp;
1268
 
1269
    // Install signal handlers
1270
    sigemptyset( &sa.sa_mask );
1271
    sa.sa_flags = SA_SIGINFO;
1272
 
1273
    sa.sa_sigaction = sigrt0;
1274
    sigaction( SIGRTMIN, &sa, NULL );
1275
 
1276
    sa.sa_sigaction = sigrt1;
1277
    sigaction( SIGRTMIN+1, &sa, NULL );
1278
 
1279
    sa.sa_sigaction = sigrt2;
1280
    sigaction( SIGRTMIN+2, &sa, NULL );
1281
 
1282
    // Set up common bits of sigevent
1283
 
1284
    sigev.sigev_notify = SIGEV_SIGNAL;
1285
 
1286
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1287
    for (i = 0;  i < ntimers;  i++) {
1288
        HAL_CLOCK_READ(&timer_ft[i].start);
1289
        sigev.sigev_signo = SIGRTMIN;
1290
        sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1291
        res = timer_create( CLOCK_REALTIME, &sigev, &timers[i]);
1292
        HAL_CLOCK_READ(&timer_ft[i].end);
1293
        CYG_ASSERT( res == 0 , "timer_create() returned error");
1294
    }
1295
    show_times(timer_ft, ntimers, "Create timer");
1296
 
1297
 
1298
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1299
    tp.it_value.tv_sec = 0;
1300
    tp.it_value.tv_nsec = 0;
1301
    tp.it_interval.tv_sec = 0;
1302
    tp.it_interval.tv_nsec = 0;
1303
    for (i = 0;  i < ntimers;  i++) {
1304
        HAL_CLOCK_READ(&timer_ft[i].start);
1305
        res = timer_settime( timers[i], 0, &tp, NULL );
1306
        HAL_CLOCK_READ(&timer_ft[i].end);
1307
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1308
    }
1309
    show_times(timer_ft, ntimers, "Initialize timer to zero");
1310
 
1311
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1312
    tp.it_value.tv_sec = 1;
1313
    tp.it_value.tv_nsec = 250000000;
1314
    tp.it_interval.tv_sec = 0;
1315
    tp.it_interval.tv_nsec = 0;
1316
    for (i = 0;  i < ntimers;  i++) {
1317
        HAL_CLOCK_READ(&timer_ft[i].start);
1318
        res = timer_settime( timers[i], 0, &tp, NULL );
1319
        HAL_CLOCK_READ(&timer_ft[i].end);
1320
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1321
    }
1322
    show_times(timer_ft, ntimers, "Initialize timer to 1.25 sec");
1323
 
1324
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1325
    tp.it_value.tv_sec = 0;
1326
    tp.it_value.tv_nsec = 0;
1327
    tp.it_interval.tv_sec = 0;
1328
    tp.it_interval.tv_nsec = 0;
1329
    for (i = 0;  i < ntimers;  i++) {
1330
        HAL_CLOCK_READ(&timer_ft[i].start);
1331
        res = timer_settime( timers[i], 0, &tp, NULL );
1332
        HAL_CLOCK_READ(&timer_ft[i].end);
1333
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1334
    }
1335
    show_times(timer_ft, ntimers, "Disable timer");
1336
 
1337
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1338
    for (i = 0;  i < ntimers;  i++) {
1339
        HAL_CLOCK_READ(&timer_ft[i].start);
1340
        res = timer_delete( timers[i] );
1341
        HAL_CLOCK_READ(&timer_ft[i].end);
1342
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1343
    }
1344
    show_times(timer_ft, ntimers, "Delete timer");
1345
 
1346
 
1347
 
1348
    sigev.sigev_signo = SIGRTMIN+1;
1349
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1350
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1351
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1352
    tp.it_value.tv_sec = 0;
1353
    tp.it_value.tv_nsec = 50000000;
1354
    tp.it_interval.tv_sec = 0;
1355
    tp.it_interval.tv_nsec = 50000000;;
1356
    timer_cnt = 0;
1357
    res = timer_settime( timers[0], 0, &tp, NULL );
1358
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1359
    sem_init(&synchro, 0, 0);
1360
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1361
    do
1362
    { res = sem_wait(&synchro);
1363
    } while( res == -1 && errno == EINTR );
1364
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1365
    tp.it_value.tv_sec = 0;
1366
    tp.it_value.tv_nsec = 0;
1367
    tp.it_interval.tv_sec = 0;
1368
    tp.it_interval.tv_nsec = 0;
1369
    res = timer_settime( timers[0], 0, &tp, NULL );
1370
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1371
    res = timer_delete( timers[0] );
1372
    CYG_ASSERT( res == 0 , "timer_delete() returned error");
1373
    show_times(sched_ft, nscheds, "Timer latency [0 threads]");
1374
 
1375
 
1376
 
1377
 
1378
    struct sched_param schedparam;
1379
    pthread_attr_t attr;
1380
    void *retval;
1381
 
1382
    // Set my priority higher than any I plan to create
1383
    schedparam.sched_priority = 20;
1384
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1385
 
1386
 
1387
    // Initiaize thread creation attributes
1388
 
1389
    pthread_attr_init( &attr );
1390
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1391
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
1392
    schedparam.sched_priority = 10;
1393
    pthread_attr_setschedparam( &attr, &schedparam );
1394
 
1395
    for (i = 0;  i < 2;  i++) {
1396
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1397
        pthread_attr_setstacksize( &attr, STACK_SIZE );
1398
        res = pthread_create( &threads[i],
1399
                        &attr,
1400
                        timer_test,
1401
                        (void *)i
1402
                        );
1403
        CYG_ASSERT( res == 0 , "pthread_create() returned error");
1404
    }
1405
 
1406
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1407
 
1408
    sigev.sigev_signo = SIGRTMIN+1;
1409
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1410
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1411
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1412
    tp.it_value.tv_sec = 0;
1413
    tp.it_value.tv_nsec = 50000000;
1414
    tp.it_interval.tv_sec = 0;
1415
    tp.it_interval.tv_nsec = 50000000;;
1416
    timer_cnt = 0;
1417
    res = timer_settime( timers[0], 0, &tp, NULL );
1418
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1419
 
1420
    sem_init(&synchro, 0, 0);
1421
    do
1422
    { res = sem_wait(&synchro);
1423
    } while( res == -1 && errno == EINTR );
1424
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1425
    res = timer_delete(timers[0]);
1426
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1427
    show_times(sched_ft, nscheds, "Timer latency [2 threads]");
1428
    for (i = 0;  i < 2;  i++) {
1429
        pthread_cancel(threads[i]);
1430
        pthread_join(threads[i], &retval);
1431
    }
1432
 
1433
 
1434
 
1435
    for (i = 0;  i < ntest_threads;  i++) {
1436
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1437
        pthread_attr_setstacksize( &attr, STACK_SIZE );
1438
        res = pthread_create( &threads[i],
1439
                        &attr,
1440
                        timer_test,
1441
                        (void *)i
1442
                        );
1443
        CYG_ASSERT( res == 0 , "pthread_create() returned error");
1444
    }
1445
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1446
    sigev.sigev_signo = SIGRTMIN+1;
1447
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1448
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1449
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1450
    tp.it_value.tv_sec = 0;
1451
    tp.it_value.tv_nsec = 50000000;
1452
    tp.it_interval.tv_sec = 0;
1453
    tp.it_interval.tv_nsec = 50000000;;
1454
    timer_cnt = 0;
1455
    res = timer_settime( timers[0], 0, &tp, NULL );
1456
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1457
 
1458
    sem_init(&synchro, 0, 0);
1459
    do
1460
    { res = sem_wait(&synchro);
1461
    } while( res == -1 && errno == EINTR );
1462
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1463
    res = timer_delete(timers[0]);
1464
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1465
    show_times(sched_ft, nscheds, "Timer latency [many threads]");
1466
    for (i = 0;  i < ntest_threads;  i++) {
1467
        pthread_cancel(threads[i]);
1468
        pthread_join(threads[i], &retval);
1469
    }
1470
 
1471
    sem_init(&synchro, 0, 0);
1472
    sem_init(&timer_sem, 0, 0);
1473
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1474
    pthread_attr_setstacksize( &attr, STACK_SIZE );
1475
    res = pthread_create( &threads[0],
1476
                          &attr,
1477
                          timer_test2,
1478
                          (void *)0
1479
        );
1480
    CYG_ASSERT( res == 0 , "pthread_create() returned error");
1481
 
1482
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1483
    sigev.sigev_signo = SIGRTMIN+2;
1484
    sigev.sigev_value.sival_ptr = (void*)(threads[0]);
1485
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1486
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1487
    tp.it_value.tv_sec = 0;
1488
    tp.it_value.tv_nsec = 50000000;
1489
    tp.it_interval.tv_sec = 0;
1490
    tp.it_interval.tv_nsec = 50000000;;
1491
    timer_cnt = 0;
1492
    res = timer_settime( timers[0], 0, &tp, NULL );
1493
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1494
 
1495
    do
1496
    { res = sem_wait(&synchro);
1497
    } while( res == -1 && errno == EINTR );
1498
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1499
    res = timer_delete(timers[0]);
1500
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1501
    show_times(sched_ft, nscheds, "Timer -> thread post latency");
1502
    sem_post(&timer_sem);
1503
//    pthread_cancel(threads[0]);
1504
    pthread_join(threads[0], &retval);
1505
 
1506
 
1507
    end_of_test_group();
1508
}
1509
 
1510
 
1511
//--------------------------------------------------------------------------
1512
 
1513
void
1514
run_all_tests()
1515
{
1516
    int i;
1517
    cyg_uint32 tv[nsamples], tv0, tv1;
1518
//    cyg_uint32 min_stack, max_stack, total_stack, actual_stack, j;
1519
    cyg_tick_count_t ticks, tick0, tick1;
1520
#ifdef CYG_SCHEDULER_LOCK_TIMINGS
1521
    cyg_uint32 lock_ave, lock_max;
1522
#endif
1523
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1524
    cyg_int32 clock_ave;
1525
#endif
1526
 
1527
    disable_clock_latency_measurement();
1528
 
1529
//    cyg_test_dump_thread_stack_stats( "Startup, main stack", thread[0] );
1530
    cyg_test_dump_interrupt_stack_stats( "Startup" );
1531
    cyg_test_dump_idlethread_stack_stats( "Startup" );
1532
    cyg_test_clear_interrupt_stack();
1533
 
1534
    diag_printf("\neCos Kernel Timings\n");
1535
    diag_printf("Notes: all times are in microseconds (.000001) unless otherwise stated\n");
1536
#ifdef STATS_WITHOUT_FIRST_SAMPLE
1537
    diag_printf("       second line of results have first sample removed\n");
1538
#endif
1539
 
1540
    cyg_thread_delay(2);  // Make sure the clock is actually running
1541
 
1542
    ns_per_system_clock = 1000000/rtc_resolution[1];
1543
 
1544
    for (i = 0;  i < nsamples;  i++) {
1545
        HAL_CLOCK_READ(&tv[i]);
1546
    }
1547
    tv0 = 0;
1548
    for (i = 1;  i < nsamples;  i++) {
1549
        tv0 += tv[i] - tv[i-1];
1550
    }
1551
    end_of_test_group();
1552
 
1553
    overhead = tv0 / (nsamples-1);
1554
    diag_printf("Reading the hardware clock takes %d 'ticks' overhead\n", overhead);
1555
    diag_printf("... this value will be factored out of all other measurements\n");
1556
 
1557
    // Try and measure how long the clock interrupt handling takes
1558
    for (i = 0;  i < nsamples;  i++) {
1559
        tick0 = cyg_current_time();
1560
        while (true) {
1561
            tick1 = cyg_current_time();
1562
            if (tick0 != tick1) break;
1563
        }
1564
        HAL_CLOCK_READ(&tv[i]);
1565
    }
1566
    tv1 = 0;
1567
    for (i = 0;  i < nsamples;  i++) {
1568
        tv1 += tv[i] * 1000;
1569
    }
1570
    tv1 = tv1 / nsamples;
1571
    tv1 -= overhead;  // Adjust out the cost of getting the timer value
1572
    diag_printf("Clock interrupt took");
1573
    show_ticks_in_us(tv1);
1574
    diag_printf(" microseconds (%d raw clock ticks)\n", tv1/1000);
1575
    enable_clock_latency_measurement();
1576
 
1577
    ticks = cyg_current_time();
1578
 
1579
    show_test_parameters();
1580
    show_times_hdr();
1581
 
1582
    reset_clock_latency_measurement();
1583
 
1584
    run_thread_tests();
1585
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1586
    run_mutex_tests();
1587
//    run_mbox_tests();
1588
#endif
1589
    run_semaphore_tests();
1590
    run_timer_tests();
1591
 
1592
#ifdef CYG_SCHEDULER_LOCK_TIMINGS
1593
    Cyg_Scheduler::get_lock_times(&lock_ave, &lock_max);
1594
    diag_printf("\nMax lock:");
1595
    show_ticks_in_us(lock_max);
1596
    diag_printf(", Ave lock:");
1597
    show_ticks_in_us(lock_ave);
1598
    diag_printf("\n");
1599
#endif
1600
 
1601
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1602
    // Display latency figures in same format as all other numbers
1603
    disable_clock_latency_measurement();
1604
    clock_ave = (total_clock_latency*1000) / total_clock_interrupts;
1605
    show_ticks_in_us(clock_ave);
1606
    show_ticks_in_us(min_clock_latency*1000);
1607
    show_ticks_in_us(max_clock_latency*1000);
1608
    show_ticks_in_us(0);
1609
    diag_printf("            Clock/interrupt latency\n\n");
1610
    enable_clock_latency_measurement();
1611
#endif
1612
 
1613
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
1614
    disable_clock_latency_measurement();
1615
    clock_ave = (total_clock_dsr_latency*1000) / total_clock_dsr_calls;
1616
    show_ticks_in_us(clock_ave);
1617
    show_ticks_in_us(min_clock_dsr_latency*1000);
1618
    show_ticks_in_us(max_clock_dsr_latency*1000);
1619
    show_ticks_in_us(0);
1620
    diag_printf("            Clock DSR latency\n\n");
1621
    enable_clock_latency_measurement();
1622
#endif
1623
 
1624
#if 0    
1625
    disable_clock_latency_measurement();
1626
    min_stack = STACK_SIZE;
1627
    max_stack = 0;
1628
    total_stack = 0;
1629
    for (i = 0;  i < (int)NTEST_THREADS;  i++) {
1630
        for (j = 0;  j < STACK_SIZE;  j++) {
1631
            if (stacks[i][j]) break;
1632
        }
1633
        actual_stack = STACK_SIZE-j;
1634
        if (actual_stack < min_stack) min_stack = actual_stack;
1635
        if (actual_stack > max_stack) max_stack = actual_stack;
1636
        total_stack += actual_stack;
1637
    }
1638
    for (j = 0;  j < STACKSIZE;  j++) {
1639
        if (((char *)stack[0])[j]) break;
1640
    }
1641
    diag_printf("%5d   %5d   %5d  (main stack: %5d)  Thread stack used (%d total)\n",
1642
                total_stack/NTEST_THREADS, min_stack, max_stack,
1643
                STACKSIZE - j, STACK_SIZE);
1644
#endif
1645
 
1646
//    cyg_test_dump_thread_stack_stats( "All done, main stack", thread[0] );
1647
    cyg_test_dump_interrupt_stack_stats( "All done" );
1648
    cyg_test_dump_idlethread_stack_stats( "All done" );
1649
 
1650
    enable_clock_latency_measurement();
1651
 
1652
    ticks = cyg_current_time();
1653
    diag_printf("\nTiming complete - %d ms total\n\n", (int)((ticks*ns_per_system_clock)/1000));
1654
 
1655
    CYG_TEST_PASS_FINISH("Basic timing OK");
1656
}
1657
 
1658
int main( int argc, char **argv )
1659
{
1660
    CYG_TEST_INIT();
1661
 
1662
    if (cyg_test_is_simulator) {
1663
        nsamples = NSAMPLES_SIM;
1664
        ntest_threads = NTEST_THREADS_SIM;
1665
        nthread_switches = NTHREAD_SWITCHES_SIM;
1666
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1667
        nmutexes = NMUTEXES_SIM;
1668
#endif
1669
        nmboxes = NMBOXES_SIM;
1670
        nsemaphores = NSEMAPHORES_SIM;
1671
        nscheds = NSCHEDS_SIM;
1672
        ntimers = NTIMERS_SIM;
1673
    } else {
1674
        nsamples = NSAMPLES;
1675
        ntest_threads = NTEST_THREADS;
1676
        nthread_switches = NTHREAD_SWITCHES;
1677
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1678
        nmutexes = NMUTEXES;
1679
#endif
1680
        nmboxes = NMBOXES;
1681
        nsemaphores = NSEMAPHORES;
1682
        nscheds = NSCHEDS;
1683
        ntimers = NTIMERS;
1684
    }
1685
 
1686
    // Sanity
1687
#ifdef WORKHORSE_TEST
1688
    ntest_threads = max(512, ntest_threads);
1689
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1690
    nmutexes = max(1024, nmutexes);
1691
#endif
1692
    nsemaphores = max(1024, nsemaphores);
1693
    nmboxes = max(1024, nmboxes);
1694
    ncounters = max(1024, ncounters);
1695
    ntimers = max(1024, ntimers);
1696
#else
1697
    ntest_threads = max(64, ntest_threads);
1698
#ifdef CYGPKG_POSIX_PTHREAD_MUTEX
1699
    nmutexes = max(32, nmutexes);
1700
#endif
1701
    nsemaphores = max(32, nsemaphores);
1702
    nmboxes = max(32, nmboxes);
1703
    ntimers = max(32, ntimers);
1704
#endif
1705
 
1706
    run_all_tests();
1707
 
1708
}
1709
 
1710
#endif // CYGFUN_KERNEL_API_C, etc.
1711
 
1712
// EOF tm_posix.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.