OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [ecos-2.0/] [packages/] [compat/] [posix/] [v2_0/] [tests/] [tm_basic.cxx] - Blame information for rev 307

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 27 unneback
//==========================================================================
2
//
3
//        tm_basic.cxx
4
//
5
//        Basic timing test / scaffolding
6
//
7
//==========================================================================
8
//####ECOSGPLCOPYRIGHTBEGIN####
9
// -------------------------------------------
10
// This file is part of eCos, the Embedded Configurable Operating System.
11
// Copyright (C) 1998, 1999, 2000, 2001, 2002 Red Hat, Inc.
12
// Copyright (C) 2002 Jonathan Larmour
13
//
14
// eCos is free software; you can redistribute it and/or modify it under
15
// the terms of the GNU General Public License as published by the Free
16
// Software Foundation; either version 2 or (at your option) any later version.
17
//
18
// eCos is distributed in the hope that it will be useful, but WITHOUT ANY
19
// WARRANTY; without even the implied warranty of MERCHANTABILITY or
20
// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
21
// for more details.
22
//
23
// You should have received a copy of the GNU General Public License along
24
// with eCos; if not, write to the Free Software Foundation, Inc.,
25
// 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26
//
27
// As a special exception, if other files instantiate templates or use macros
28
// or inline functions from this file, or you compile this file and link it
29
// with other works to produce a work based on this file, this file does not
30
// by itself cause the resulting work to be covered by the GNU General Public
31
// License. However the source code for this file must still be made available
32
// in accordance with section (3) of the GNU General Public License.
33
//
34
// This exception does not invalidate any other reasons why a work based on
35
// this file might be covered by the GNU General Public License.
36
//
37
// Alternative licenses for eCos may be arranged by contacting Red Hat, Inc.
38
// at http://sources.redhat.com/ecos/ecos-license/
39
// -------------------------------------------
40
//####ECOSGPLCOPYRIGHTEND####
41
//==========================================================================
42
//#####DESCRIPTIONBEGIN####
43
//
44
// Author(s):     gthomas,nickg
45
// Contributors:  jlarmour
46
// Date:          1998-10-19
47
// Description:   Very simple kernel timing test
48
//####DESCRIPTIONEND####
49
//==========================================================================
50
 
51
 
52
#include <cyg/infra/testcase.h>
53
#include <cyg/infra/diag.h>
54
#include <pkgconf/posix.h>
55
#include <pkgconf/system.h>
56
#ifdef CYGPKG_KERNEL
57
#include <pkgconf/kernel.h>
58
#endif
59
 
60
#ifndef CYGPKG_POSIX_SIGNALS
61
#define NA_MSG "No POSIX signals"
62
#elif !defined(CYGPKG_POSIX_TIMERS)
63
#define NA_MSG "No POSIX timers"
64
#elif !defined(CYGPKG_POSIX_PTHREAD)
65
#define NA_MSG "POSIX threads not enabled"
66
#elif !defined(CYGFUN_KERNEL_API_C)
67
#define NA_MSG "Kernel C API not enabled"
68
#elif !defined(CYGSEM_KERNEL_SCHED_MLQUEUE)
69
#define NA_MSG "Kernel mlqueue scheduler not enabled"
70
#elif !defined(CYGVAR_KERNEL_COUNTERS_CLOCK)
71
#define NA_MSG "Kernel clock not enabled"
72
#elif CYGNUM_KERNEL_SCHED_PRIORITIES <= 12
73
#define NA_MSG "Kernel scheduler properties <= 12"
74
#endif
75
 
76
//==========================================================================
77
 
78
#ifdef NA_MSG
79
extern "C" void
80
cyg_start(void)
81
{
82
    CYG_TEST_INIT();
83
    CYG_TEST_NA(NA_MSG);
84
}
85
#else
86
 
87
#include <pkgconf/kernel.h>
88
#include <pkgconf/hal.h>
89
 
90
#include <cyg/kernel/sched.hxx>
91
#include <cyg/kernel/thread.hxx>
92
#include <cyg/kernel/thread.inl>
93
#include <cyg/kernel/mutex.hxx>
94
#include <cyg/kernel/sema.hxx>
95
#include <cyg/kernel/sched.inl>
96
#include <cyg/kernel/clock.hxx>
97
#include <cyg/kernel/clock.inl>
98
#include <cyg/kernel/kapi.h>
99
 
100
#include <cyg/infra/testcase.h>
101
 
102
#include <cyg/kernel/test/stackmon.h>
103
#include CYGHWR_MEMORY_LAYOUT_H
104
 
105
 
106
// POSIX headers
107
 
108
#include <sys/types.h>
109
#include <pthread.h>
110
#include <semaphore.h>
111
#include <time.h>
112
#include <signal.h>
113
#include <errno.h>
114
 
115
//==========================================================================
116
// Define this to see the statistics with the first sample datum removed.
117
// This can expose the effects of caches on the speed of operations.
118
 
119
#undef STATS_WITHOUT_FIRST_SAMPLE
120
 
121
//==========================================================================
122
 
123
// Structure used to keep track of times
124
typedef struct fun_times {
125
    cyg_uint32 start;
126
    cyg_uint32 end;
127
} fun_times;
128
 
129
//==========================================================================
130
 
131
#define STACK_SIZE (PTHREAD_STACK_MIN*2)
132
 
133
// Defaults
134
#define NTEST_THREADS    16
135
#define NMUTEXES         32
136
#define NMBOXES          32
137
#define NSEMAPHORES      32
138
#define NTIMERS          32
139
 
140
 
141
#define NSAMPLES         32
142
#define NTHREAD_SWITCHES 128
143
#define NSCHEDS          128
144
 
145
#define NSAMPLES_SIM         2
146
#define NTEST_THREADS_SIM    2
147
#define NTHREAD_SWITCHES_SIM 4
148
#define NMUTEXES_SIM         2
149
#define NMBOXES_SIM          2
150
#define NSEMAPHORES_SIM      2
151
#define NSCHEDS_SIM          4
152
#define NTIMERS_SIM          2
153
 
154
//==========================================================================
155
 
156
static int nsamples;
157
static int ntest_threads;
158
static int nthread_switches;
159
static int nmutexes;
160
static int nmboxes;
161
static int nsemaphores;
162
static int nscheds;
163
static int ntimers;
164
 
165
static char stacks[NTEST_THREADS][STACK_SIZE];
166
static pthread_t threads[NTEST_THREADS];
167
static int overhead;
168
static sem_t synchro;
169
static fun_times thread_ft[NTEST_THREADS];
170
 
171
static fun_times test2_ft[NTHREAD_SWITCHES];
172
 
173
static pthread_mutex_t test_mutexes[NMUTEXES];
174
static fun_times mutex_ft[NMUTEXES];
175
static pthread_t mutex_test_thread_handle;
176
 
177
#if 0
178
static cyg_mbox test_mboxes[NMBOXES];
179
static cyg_handle_t test_mbox_handles[NMBOXES];
180
static fun_times mbox_ft[NMBOXES];
181
static cyg_thread mbox_test_thread;
182
static cyg_handle_t mbox_test_thread_handle;
183
#endif
184
 
185
static sem_t test_semaphores[NSEMAPHORES];
186
static fun_times semaphore_ft[NSEMAPHORES];
187
static pthread_t semaphore_test_thread_handle;
188
 
189
static fun_times sched_ft[NSCHEDS];
190
 
191
static timer_t timers[NTIMERS];
192
static fun_times timer_ft[NTIMERS];
193
 
194
static long rtc_resolution[] = CYGNUM_KERNEL_COUNTERS_RTC_RESOLUTION;
195
static long ns_per_system_clock;
196
 
197
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY)
198
// Data kept by kernel real time clock measuring clock interrupt latency
199
extern cyg_tick_count total_clock_latency, total_clock_interrupts;
200
extern cyg_int32 min_clock_latency, max_clock_latency;
201
extern bool measure_clock_latency;
202
#endif
203
 
204
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
205
extern cyg_tick_count total_clock_dsr_latency, total_clock_dsr_calls;
206
extern cyg_int32 min_clock_dsr_latency, max_clock_dsr_latency;
207
extern bool measure_clock_latency;
208
#endif
209
 
210
//==========================================================================
211
 
212
void run_sched_tests(void);
213
void run_thread_tests(void);
214
void run_thread_switch_test(void);
215
void run_mutex_tests(void);
216
void run_mutex_circuit_test(void);
217
void run_mbox_tests(void);
218
void run_mbox_circuit_test(void);
219
void run_semaphore_tests(void);
220
void run_semaphore_circuit_test(void);
221
void run_timer_tests(void);
222
 
223
//==========================================================================
224
 
225
#ifndef max
226
#define max(n,m) (m > n ? n : m)
227
#endif
228
 
229
//==========================================================================
230
// Wait until a clock tick [real time clock] has passed.  This should keep it
231
// from happening again during a measurement, thus minimizing any fluctuations
232
void
233
wait_for_tick(void)
234
{
235
    cyg_tick_count_t tv0, tv1;
236
    tv0 = cyg_current_time();
237
    while (true) {
238
        tv1 = cyg_current_time();
239
        if (tv1 != tv0) break;
240
    }
241
}
242
 
243
//--------------------------------------------------------------------------
244
// Display a number of ticks as microseconds
245
// Note: for improved calculation significance, values are kept in ticks*1000
246
void
247
show_ticks_in_us(cyg_uint32 ticks)
248
{
249
    long long ns;
250
    ns = (ns_per_system_clock * (long long)ticks) / CYGNUM_KERNEL_COUNTERS_RTC_PERIOD;
251
    ns += 5;  // for rounding to .01us
252
    diag_printf("%5d.%02d", (int)(ns/1000), (int)((ns%1000)/10));
253
}
254
 
255
//--------------------------------------------------------------------------
256
//
257
// If the kernel is instrumented to measure clock interrupt latency, these
258
// measurements can be drastically perturbed by printing via "diag_printf()"
259
// since that code may run with interrupts disabled for long periods.
260
//
261
// In order to get accurate/reasonable latency figures _for the kernel 
262
// primitive functions beint tested_, the kernel's latency measurements
263
// are suspended while the printing actually takes place.
264
//
265
// The measurements are reenabled after the printing, thus allowing for
266
// fair measurements of the kernel primitives, which are not distorted
267
// by the printing mechanisms.
268
 
269
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
270
void
271
disable_clock_latency_measurement(void)
272
{
273
    wait_for_tick();
274
    measure_clock_latency = false;
275
}
276
 
277
void
278
enable_clock_latency_measurement(void)
279
{
280
    wait_for_tick();
281
    measure_clock_latency = true;
282
}
283
 
284
// Ensure that the measurements are reasonable (no startup anomalies)
285
void
286
reset_clock_latency_measurement(void)
287
{
288
  disable_clock_latency_measurement();
289
  total_clock_latency = 0;
290
  total_clock_interrupts = 0;
291
  min_clock_latency = 0x7FFFFFFF;
292
  max_clock_latency = 0;
293
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)  
294
  total_clock_dsr_latency = 0;
295
  total_clock_dsr_calls = 0;
296
  min_clock_dsr_latency = 0x7FFFFFFF;
297
  max_clock_dsr_latency = 0;
298
#endif  
299
  enable_clock_latency_measurement();
300
 
301
}
302
#else
303
#define disable_clock_latency_measurement()
304
#define enable_clock_latency_measurement()
305
#define reset_clock_latency_measurement()
306
#endif
307
 
308
//--------------------------------------------------------------------------
309
 
310
void
311
show_times_hdr(void)
312
{
313
    disable_clock_latency_measurement();
314
    diag_printf("\n");
315
    diag_printf("                                 Confidence\n");
316
    diag_printf("     Ave     Min     Max     Var  Ave  Min  Function\n");
317
    diag_printf("  ======  ======  ======  ====== ========== ========\n");
318
    enable_clock_latency_measurement();
319
}
320
 
321
void
322
show_times_detail(fun_times ft[], int nsamples, char *title, bool ignore_first)
323
{
324
    int i, delta, min, max, con_ave, con_min, ave_dev;
325
    int start_sample, total_samples;
326
    cyg_int32 total, ave;
327
 
328
    if (ignore_first) {
329
        start_sample = 1;
330
        total_samples = nsamples-1;
331
    } else {
332
        start_sample = 0;
333
        total_samples = nsamples;
334
    }
335
    total = 0;
336
    min = 0x7FFFFFFF;
337
    max = 0;
338
    for (i = start_sample;  i < nsamples;  i++) {
339
        if (ft[i].end < ft[i].start) {
340
            // Clock wrapped around (timer tick)
341
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
342
        } else {
343
            delta = ft[i].end - ft[i].start;
344
        }
345
        delta -= overhead;
346
        if (delta < 0) delta = 0;
347
        delta *= 1000;
348
        total += delta;
349
        if (delta < min) min = delta;
350
        if (delta > max) max = delta;
351
    }
352
    ave = total / total_samples;
353
    total = 0;
354
    ave_dev = 0;
355
    for (i = start_sample;  i < nsamples;  i++) {
356
        if (ft[i].end < ft[i].start) {
357
            // Clock wrapped around (timer tick)
358
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
359
        } else {
360
            delta = ft[i].end - ft[i].start;
361
        }
362
        delta -= overhead;
363
        if (delta < 0) delta = 0;
364
        delta *= 1000;
365
        delta = delta - ave;
366
        if (delta < 0) delta = -delta;
367
        ave_dev += delta;
368
    }
369
    ave_dev /= total_samples;
370
    con_ave = 0;
371
    con_min = 0;
372
    for (i = start_sample;  i < nsamples;  i++) {
373
        if (ft[i].end < ft[i].start) {
374
            // Clock wrapped around (timer tick)
375
            delta = (ft[i].end+CYGNUM_KERNEL_COUNTERS_RTC_PERIOD) - ft[i].start;
376
        } else {
377
            delta = ft[i].end - ft[i].start;
378
        }
379
        delta -= overhead;
380
        if (delta < 0) delta = 0;
381
        delta *= 1000;
382
        if ((delta <= (ave+ave_dev)) && (delta >= (ave-ave_dev))) con_ave++;
383
        if ((delta <= (min+ave_dev)) && (delta >= (min-ave_dev))) con_min++;
384
    }
385
    con_ave = (con_ave * 100) / total_samples;
386
    con_min = (con_min * 100) / total_samples;
387
    show_ticks_in_us(ave);
388
    show_ticks_in_us(min);
389
    show_ticks_in_us(max);
390
    show_ticks_in_us(ave_dev);
391
    disable_clock_latency_measurement();
392
    diag_printf("  %3d%% %3d%%", con_ave, con_min);
393
    diag_printf(" %s\n", title);
394
    enable_clock_latency_measurement();
395
}
396
 
397
void
398
show_times(fun_times ft[], int nsamples, char *title)
399
{
400
    show_times_detail(ft, nsamples, title, false);
401
#ifdef STATS_WITHOUT_FIRST_SAMPLE
402
    show_times_detail(ft, nsamples, "", true);
403
#endif
404
}
405
 
406
//--------------------------------------------------------------------------
407
 
408
void
409
show_test_parameters(void)
410
{
411
    disable_clock_latency_measurement();
412
    diag_printf("\nTesting parameters:\n");
413
    diag_printf("   Clock samples:         %5d\n", nsamples);
414
    diag_printf("   Threads:               %5d\n", ntest_threads);
415
    diag_printf("   Thread switches:       %5d\n", nthread_switches);
416
    diag_printf("   Mutexes:               %5d\n", nmutexes);
417
    diag_printf("   Mailboxes:             %5d\n", nmboxes);
418
    diag_printf("   Semaphores:            %5d\n", nsemaphores);
419
    diag_printf("   Scheduler operations:  %5d\n", nscheds);
420
    diag_printf("   Timers:                %5d\n", ntimers);
421
    diag_printf("\n");
422
    enable_clock_latency_measurement();
423
}
424
 
425
void
426
end_of_test_group(void)
427
{
428
    disable_clock_latency_measurement();
429
    diag_printf("\n");
430
    enable_clock_latency_measurement();
431
}
432
 
433
//--------------------------------------------------------------------------
434
// Compute a name for a thread
435
 
436
char *
437
thread_name(char *basename, int indx) {
438
    return "<<NULL>>";  // Not currently used
439
}
440
 
441
//--------------------------------------------------------------------------
442
// test0 - null test, just return
443
 
444
void *
445
test0(void *indx)
446
{
447
    return indx;
448
}
449
 
450
//--------------------------------------------------------------------------
451
// test3 - loop, yeilding repeatedly and checking for cancellation
452
 
453
void *
454
test3(void *indx)
455
{
456
    for(;;)
457
    {
458
        sched_yield();
459
        pthread_testcancel();
460
    }
461
 
462
    return indx;
463
}
464
 
465
//--------------------------------------------------------------------------
466
// test1 - empty test, simply exit.  Last thread signals parent.
467
 
468
void *
469
test1( void *indx)
470
{
471
    if ((cyg_uint32)indx == (cyg_uint32)(ntest_threads-1)) {
472
        sem_post(&synchro);  // Signal that last thread is dying
473
    }
474
    return indx;
475
}
476
 
477
//--------------------------------------------------------------------------
478
// test2 - measure thread switch times
479
 
480
void *
481
test2(void *indx)
482
{
483
    int i;
484
    for (i = 0;  i < nthread_switches;  i++) {
485
        if ((int)indx == 0) {
486
            HAL_CLOCK_READ(&test2_ft[i].start);
487
        } else {
488
            HAL_CLOCK_READ(&test2_ft[i].end);
489
        }
490
        sched_yield();
491
    }
492
    if ((int)indx == 1) {
493
        sem_post(&synchro);
494
    }
495
 
496
    return indx;
497
}
498
 
499
//--------------------------------------------------------------------------
500
// Full-circuit mutex unlock/lock test
501
 
502
void *
503
mutex_test(void * indx)
504
{
505
    int i;
506
    pthread_mutex_lock(&test_mutexes[0]);
507
    for (i = 0;  i < nmutexes;  i++) {
508
        sem_wait(&synchro);
509
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
510
        HAL_CLOCK_READ(&mutex_ft[i].start);
511
        pthread_mutex_unlock(&test_mutexes[0]);
512
        pthread_mutex_lock(&test_mutexes[0]);
513
        sem_post(&synchro);
514
    }
515
    return indx;
516
}
517
 
518
//--------------------------------------------------------------------------
519
// Full-circuit mbox put/get test
520
 
521
#if 0
522
void
523
mbox_test(cyg_uint32 indx)
524
{
525
    void *item;
526
    do {
527
        item = cyg_mbox_get(test_mbox_handles[0]);
528
        HAL_CLOCK_READ(&mbox_ft[(int)item].end);
529
        cyg_semaphore_post(&synchro);
530
    } while ((int)item != (nmboxes-1));
531
    cyg_thread_exit(0);
532
}
533
#endif
534
 
535
//--------------------------------------------------------------------------
536
// Full-circuit semaphore post/wait test
537
 
538
void *
539
semaphore_test(void * indx)
540
{
541
    int i;
542
    for (i = 0;  i < nsemaphores;  i++) {
543
        sem_wait(&test_semaphores[0]);
544
        HAL_CLOCK_READ(&semaphore_ft[i].end);
545
        sem_post(&synchro);
546
    }
547
    return indx;
548
}
549
 
550
//--------------------------------------------------------------------------
551
//
552
// This set of tests is used to measure kernel primitives that deal with threads
553
//
554
 
555
void
556
run_thread_tests(void)
557
{
558
 
559
 
560
    int i;
561
    struct sched_param schedparam;
562
    pthread_attr_t attr;
563
    int policy;
564
    void *retval;
565
 
566
    // Set my priority higher than any I plan to create
567
    schedparam.sched_priority = 30;
568
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
569
 
570
    // Initiaize thread creation attributes
571
 
572
    pthread_attr_init( &attr );
573
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
574
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
575
    schedparam.sched_priority = 10;
576
    pthread_attr_setschedparam( &attr, &schedparam );
577
 
578
 
579
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
580
    for (i = 0;  i < ntest_threads;  i++) {
581
        HAL_CLOCK_READ(&thread_ft[i].start);
582
 
583
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
584
        pthread_attr_setstacksize( &attr, STACK_SIZE );
585
        pthread_create( &threads[i],
586
                        &attr,
587
                        test0,
588
                        (void *)i
589
                        );
590
 
591
        HAL_CLOCK_READ(&thread_ft[i].end);
592
    }
593
    show_times(thread_ft, ntest_threads, "Create thread");
594
 
595
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
596
    for (i = 0;  i < ntest_threads;  i++) {
597
        HAL_CLOCK_READ(&thread_ft[i].start);
598
        sched_yield();
599
        HAL_CLOCK_READ(&thread_ft[i].end);
600
    }
601
    show_times(thread_ft, ntest_threads, "Yield thread [all lower priority]");
602
 
603
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
604
    for (i = 0;  i < ntest_threads;  i++) {
605
        HAL_CLOCK_READ(&thread_ft[i].start);
606
 
607
        schedparam.sched_priority = 11;
608
        pthread_attr_setschedparam( &attr, &schedparam );
609
        pthread_setschedparam(threads[i], SCHED_RR, &schedparam);
610
 
611
        HAL_CLOCK_READ(&thread_ft[i].end);
612
    }
613
    show_times(thread_ft, ntest_threads, "Set priority");
614
 
615
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
616
    for (i = 0;  i < ntest_threads;  i++) {
617
        HAL_CLOCK_READ(&thread_ft[i].start);
618
        pthread_getschedparam( threads[i], &policy, &schedparam );
619
        HAL_CLOCK_READ(&thread_ft[i].end);
620
    }
621
    show_times(thread_ft, ntest_threads, "Get priority");
622
 
623
    cyg_thread_delay(1);        // Let the test threads run
624
 
625
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
626
    for (i = 0;  i < ntest_threads;  i++) {
627
        HAL_CLOCK_READ(&thread_ft[i].start);
628
        pthread_join(threads[i], &retval);
629
        HAL_CLOCK_READ(&thread_ft[i].end);
630
    }
631
    show_times(thread_ft, ntest_threads, "Join exited thread");
632
 
633
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
634
    for (i = 0;  i < ntest_threads;  i++) {
635
        HAL_CLOCK_READ(&thread_ft[i].start);
636
        sched_yield();
637
        HAL_CLOCK_READ(&thread_ft[i].end);
638
    }
639
    show_times(thread_ft, ntest_threads, "Yield [no other] thread");
640
 
641
 
642
    // Recreate the test set
643
 
644
    schedparam.sched_priority = 10;
645
    pthread_attr_setschedparam( &attr, &schedparam );
646
 
647
    for (i = 0;  i < ntest_threads;  i++) {
648
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
649
        pthread_attr_setstacksize( &attr, STACK_SIZE );
650
        pthread_create( &threads[i],
651
                        &attr,
652
                        test3,
653
                        (void *)i
654
                        );
655
    }
656
 
657
    cyg_thread_delay(1);        // Let the test threads run    
658
 
659
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
660
    for (i = 0;  i < ntest_threads;  i++) {
661
        HAL_CLOCK_READ(&thread_ft[i].start);
662
        pthread_cancel(threads[i]);
663
        HAL_CLOCK_READ(&thread_ft[i].end);
664
    }
665
    show_times(thread_ft, ntest_threads, "Cancel [running] thread");
666
 
667
    cyg_thread_delay(1);        // Let the test threads do their cancellations
668
 
669
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
670
    for (i = 0;  i < ntest_threads;  i++) {
671
        HAL_CLOCK_READ(&thread_ft[i].start);
672
        pthread_join(threads[i], &retval);
673
        HAL_CLOCK_READ(&thread_ft[i].end);
674
    }
675
    show_times(thread_ft, ntest_threads, "Join [cancelled] thread");
676
 
677
 
678
    // Set my priority lower than any I plan to create
679
    schedparam.sched_priority = 5;
680
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
681
 
682
    // Set up the end-of-threads synchronizer
683
    sem_init(&synchro, 0, 0);
684
 
685
    schedparam.sched_priority = 10;
686
    pthread_attr_setschedparam( &attr, &schedparam );
687
 
688
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
689
    for (i = 0;  i < ntest_threads;  i++) {
690
        HAL_CLOCK_READ(&thread_ft[i].start);
691
 
692
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
693
        pthread_attr_setstacksize( &attr, STACK_SIZE );
694
        pthread_create( &threads[i],
695
                        &attr,
696
                        test2,
697
                        (void *)i
698
                        );
699
 
700
        HAL_CLOCK_READ(&thread_ft[i].end);
701
    }
702
    show_times(thread_ft, ntest_threads, "Create [high priority] thread");
703
 
704
    sem_wait(&synchro);  // Wait for all threads to finish
705
 
706
    // Make sure they are all dead
707
    for (i = 0;  i < ntest_threads;  i++) {
708
        pthread_join(threads[i], &retval);
709
    }
710
 
711
    run_thread_switch_test();
712
    end_of_test_group();
713
 
714
}
715
 
716
//--------------------------------------------------------------------------
717
 
718
void
719
run_thread_switch_test(void)
720
{
721
 
722
    int i;
723
    struct sched_param schedparam;
724
    pthread_attr_t attr;
725
    void *retval;
726
 
727
    // Set my priority higher than any I plan to create
728
    schedparam.sched_priority = 30;
729
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
730
 
731
    // Initiaize thread creation attributes
732
 
733
    pthread_attr_init( &attr );
734
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
735
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
736
    schedparam.sched_priority = 10;
737
    pthread_attr_setschedparam( &attr, &schedparam );
738
 
739
    // Set up the end-of-threads synchronizer
740
 
741
    sem_init(&synchro, 0, 0);
742
 
743
    // Set up for thread context switch 
744
 
745
    for (i = 0;  i < 2;  i++) {
746
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
747
        pthread_attr_setstacksize( &attr, STACK_SIZE );
748
        pthread_create( &threads[i],
749
                        &attr,
750
                        test2,
751
                        (void *)i
752
                        );
753
    }
754
 
755
    wait_for_tick(); // Wait until the next clock tick to minimize aberations    
756
 
757
    sem_wait(&synchro);
758
 
759
    show_times(test2_ft, nthread_switches, "Thread switch");
760
 
761
    // Clean up
762
    for (i = 0;  i < 2;  i++) {
763
        pthread_join(threads[i], &retval);
764
    }
765
 
766
}
767
 
768
 
769
//--------------------------------------------------------------------------
770
 
771
void
772
run_mutex_tests(void)
773
{
774
 
775
    int i;
776
    pthread_mutexattr_t attr;
777
 
778
    pthread_mutexattr_init( &attr );
779
 
780
    // Mutex primitives
781
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
782
    for (i = 0;  i < nmutexes;  i++) {
783
        HAL_CLOCK_READ(&mutex_ft[i].start);
784
        pthread_mutex_init(&test_mutexes[i], &attr);
785
        HAL_CLOCK_READ(&mutex_ft[i].end);
786
    }
787
    show_times(mutex_ft, nmutexes, "Init mutex");
788
 
789
 
790
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
791
    for (i = 0;  i < nmutexes;  i++) {
792
        HAL_CLOCK_READ(&mutex_ft[i].start);
793
        pthread_mutex_lock(&test_mutexes[i]);
794
        HAL_CLOCK_READ(&mutex_ft[i].end);
795
    }
796
    show_times(mutex_ft, nmutexes, "Lock [unlocked] mutex");
797
 
798
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
799
    for (i = 0;  i < nmutexes;  i++) {
800
        HAL_CLOCK_READ(&mutex_ft[i].start);
801
        pthread_mutex_unlock(&test_mutexes[i]);
802
        HAL_CLOCK_READ(&mutex_ft[i].end);
803
    }
804
    show_times(mutex_ft, nmutexes, "Unlock [locked] mutex");
805
 
806
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
807
    for (i = 0;  i < nmutexes;  i++) {
808
        HAL_CLOCK_READ(&mutex_ft[i].start);
809
        pthread_mutex_trylock(&test_mutexes[i]);
810
        HAL_CLOCK_READ(&mutex_ft[i].end);
811
    }
812
    show_times(mutex_ft, nmutexes, "Trylock [unlocked] mutex");
813
 
814
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
815
    for (i = 0;  i < nmutexes;  i++) {
816
        HAL_CLOCK_READ(&mutex_ft[i].start);
817
        pthread_mutex_trylock(&test_mutexes[i]);
818
        HAL_CLOCK_READ(&mutex_ft[i].end);
819
    }
820
    show_times(mutex_ft, nmutexes, "Trylock [locked] mutex");
821
 
822
    // Must unlock mutices before destroying them.
823
    for (i = 0;  i < nmutexes;  i++) {
824
        pthread_mutex_unlock(&test_mutexes[i]);
825
    }
826
 
827
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
828
    for (i = 0;  i < nmutexes;  i++) {
829
        HAL_CLOCK_READ(&mutex_ft[i].start);
830
        pthread_mutex_destroy(&test_mutexes[i]);
831
        HAL_CLOCK_READ(&mutex_ft[i].end);
832
    }
833
    show_times(mutex_ft, nmutexes, "Destroy mutex");
834
 
835
 
836
    run_mutex_circuit_test();
837
    end_of_test_group();
838
}
839
 
840
//--------------------------------------------------------------------------
841
 
842
void
843
run_mutex_circuit_test(void)
844
{
845
    int i;
846
    pthread_mutexattr_t mattr;
847
    struct sched_param schedparam;
848
    pthread_attr_t attr;
849
    void *retval;
850
 
851
    // Set my priority lower than any I plan to create
852
    schedparam.sched_priority = 5;
853
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
854
 
855
    // Initiaize thread creation attributes
856
 
857
    pthread_attr_init( &attr );
858
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
859
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
860
    schedparam.sched_priority = 10;
861
    pthread_attr_setschedparam( &attr, &schedparam );
862
 
863
    // Set up for full mutex unlock/lock test
864
    pthread_mutexattr_init( &mattr );
865
    pthread_mutex_init(&test_mutexes[0], &mattr);
866
    sem_init(&synchro, 0, 0);
867
 
868
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
869
    pthread_attr_setstacksize( &attr, STACK_SIZE );
870
    pthread_create( &mutex_test_thread_handle,
871
                    &attr,
872
                    mutex_test,
873
                    (void *)0
874
        );
875
 
876
    // Need to raise priority so that this thread will block on the "lock"
877
    schedparam.sched_priority = 20;
878
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
879
 
880
    for (i = 0;  i < nmutexes;  i++) {
881
        sem_post(&synchro);
882
        pthread_mutex_lock(&test_mutexes[0]);
883
        HAL_CLOCK_READ(&mutex_ft[i].end);
884
        pthread_mutex_unlock(&test_mutexes[0]);
885
        sem_wait(&synchro);
886
    }
887
    pthread_join(mutex_test_thread_handle, &retval);
888
    show_times(mutex_ft, nmutexes, "Unlock/Lock mutex");
889
 
890
}
891
 
892
 
893
//--------------------------------------------------------------------------
894
// Message queue tests
895
 
896
// Currently disabled, pending implementation of POSIX message queues
897
 
898
#if 0
899
void
900
run_mbox_tests(void)
901
{
902
    int i, cnt;
903
    void *item;
904
    // Mailbox primitives
905
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
906
    for (i = 0;  i < nmboxes;  i++) {
907
        HAL_CLOCK_READ(&mbox_ft[i].start);
908
        cyg_mbox_create(&test_mbox_handles[i], &test_mboxes[i]);
909
        HAL_CLOCK_READ(&mbox_ft[i].end);
910
    }
911
    show_times(mbox_ft, nmboxes, "Create mbox");
912
 
913
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
914
    for (i = 0;  i < nmboxes;  i++) {
915
        HAL_CLOCK_READ(&mbox_ft[i].start);
916
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
917
        HAL_CLOCK_READ(&mbox_ft[i].end);
918
    }
919
    show_times(mbox_ft, nmboxes, "Peek [empty] mbox");
920
 
921
#ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
922
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
923
    for (i = 0;  i < nmboxes;  i++) {
924
        HAL_CLOCK_READ(&mbox_ft[i].start);
925
        cyg_mbox_put(test_mbox_handles[i], (void *)i);
926
        HAL_CLOCK_READ(&mbox_ft[i].end);
927
    }
928
    show_times(mbox_ft, nmboxes, "Put [first] mbox");
929
 
930
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
931
    for (i = 0;  i < nmboxes;  i++) {
932
        HAL_CLOCK_READ(&mbox_ft[i].start);
933
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
934
        HAL_CLOCK_READ(&mbox_ft[i].end);
935
    }
936
    show_times(mbox_ft, nmboxes, "Peek [1 msg] mbox");
937
 
938
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
939
    for (i = 0;  i < nmboxes;  i++) {
940
        HAL_CLOCK_READ(&mbox_ft[i].start);
941
        cyg_mbox_put(test_mbox_handles[i], (void *)i);
942
        HAL_CLOCK_READ(&mbox_ft[i].end);
943
    }
944
    show_times(mbox_ft, nmboxes, "Put [second] mbox");
945
 
946
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
947
    for (i = 0;  i < nmboxes;  i++) {
948
        HAL_CLOCK_READ(&mbox_ft[i].start);
949
        cnt = cyg_mbox_peek(test_mbox_handles[i]);
950
        HAL_CLOCK_READ(&mbox_ft[i].end);
951
    }
952
    show_times(mbox_ft, nmboxes, "Peek [2 msgs] mbox");
953
 
954
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
955
    for (i = 0;  i < nmboxes;  i++) {
956
        HAL_CLOCK_READ(&mbox_ft[i].start);
957
        item = cyg_mbox_get(test_mbox_handles[i]);
958
        HAL_CLOCK_READ(&mbox_ft[i].end);
959
    }
960
    show_times(mbox_ft, nmboxes, "Get [first] mbox");
961
 
962
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
963
    for (i = 0;  i < nmboxes;  i++) {
964
        HAL_CLOCK_READ(&mbox_ft[i].start);
965
        item = cyg_mbox_get(test_mbox_handles[i]);
966
        HAL_CLOCK_READ(&mbox_ft[i].end);
967
    }
968
    show_times(mbox_ft, nmboxes, "Get [second] mbox");
969
#endif // ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
970
 
971
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
972
    for (i = 0;  i < nmboxes;  i++) {
973
        HAL_CLOCK_READ(&mbox_ft[i].start);
974
        cyg_mbox_tryput(test_mbox_handles[i], (void *)i);
975
        HAL_CLOCK_READ(&mbox_ft[i].end);
976
    }
977
    show_times(mbox_ft, nmboxes, "Tryput [first] mbox");
978
 
979
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
980
    for (i = 0;  i < nmboxes;  i++) {
981
        HAL_CLOCK_READ(&mbox_ft[i].start);
982
        item = cyg_mbox_peek_item(test_mbox_handles[i]);
983
        HAL_CLOCK_READ(&mbox_ft[i].end);
984
    }
985
    show_times(mbox_ft, nmboxes, "Peek item [non-empty] mbox");
986
 
987
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
988
    for (i = 0;  i < nmboxes;  i++) {
989
        HAL_CLOCK_READ(&mbox_ft[i].start);
990
        item = cyg_mbox_tryget(test_mbox_handles[i]);
991
        HAL_CLOCK_READ(&mbox_ft[i].end);
992
    }
993
    show_times(mbox_ft, nmboxes, "Tryget [non-empty] mbox");
994
 
995
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
996
    for (i = 0;  i < nmboxes;  i++) {
997
        HAL_CLOCK_READ(&mbox_ft[i].start);
998
        item = cyg_mbox_peek_item(test_mbox_handles[i]);
999
        HAL_CLOCK_READ(&mbox_ft[i].end);
1000
    }
1001
    show_times(mbox_ft, nmboxes, "Peek item [empty] mbox");
1002
 
1003
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1004
    for (i = 0;  i < nmboxes;  i++) {
1005
        HAL_CLOCK_READ(&mbox_ft[i].start);
1006
        item = cyg_mbox_tryget(test_mbox_handles[i]);
1007
        HAL_CLOCK_READ(&mbox_ft[i].end);
1008
    }
1009
    show_times(mbox_ft, nmboxes, "Tryget [empty] mbox");
1010
 
1011
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1012
    for (i = 0;  i < nmboxes;  i++) {
1013
        HAL_CLOCK_READ(&mbox_ft[i].start);
1014
        cyg_mbox_waiting_to_get(test_mbox_handles[i]);
1015
        HAL_CLOCK_READ(&mbox_ft[i].end);
1016
    }
1017
    show_times(mbox_ft, nmboxes, "Waiting to get mbox");
1018
 
1019
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1020
    for (i = 0;  i < nmboxes;  i++) {
1021
        HAL_CLOCK_READ(&mbox_ft[i].start);
1022
        cyg_mbox_waiting_to_put(test_mbox_handles[i]);
1023
        HAL_CLOCK_READ(&mbox_ft[i].end);
1024
    }
1025
    show_times(mbox_ft, nmboxes, "Waiting to put mbox");
1026
 
1027
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1028
    for (i = 0;  i < nmboxes;  i++) {
1029
        HAL_CLOCK_READ(&mbox_ft[i].start);
1030
        cyg_mbox_delete(test_mbox_handles[i]);
1031
        HAL_CLOCK_READ(&mbox_ft[i].end);
1032
    }
1033
    show_times(mbox_ft, nmboxes, "Delete mbox");
1034
 
1035
    run_mbox_circuit_test();
1036
    end_of_test_group();
1037
}
1038
 
1039
//--------------------------------------------------------------------------
1040
 
1041
void
1042
run_mbox_circuit_test(void)
1043
{
1044
#ifdef CYGMFN_KERNEL_SYNCH_MBOXT_PUT_CAN_WAIT
1045
    int i;
1046
    // Set my priority lower than any I plan to create
1047
    cyg_thread_set_priority(cyg_thread_self(), 3);
1048
    // Set up for full mbox put/get test
1049
    cyg_mbox_create(&test_mbox_handles[0], &test_mboxes[0]);
1050
    cyg_semaphore_init(&synchro, 0);
1051
    cyg_thread_create(2,              // Priority - just a number
1052
                      mbox_test,           // entry
1053
                      0,               // index
1054
                      thread_name("thread", 0),     // Name
1055
                      &stacks[0][0],   // Stack
1056
                      STACK_SIZE,      // Size
1057
                      &mbox_test_thread_handle,   // Handle
1058
                      &mbox_test_thread    // Thread data structure
1059
        );
1060
    cyg_thread_resume(mbox_test_thread_handle);
1061
    for (i = 0;  i < nmboxes;  i++) {
1062
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
1063
        HAL_CLOCK_READ(&mbox_ft[i].start);
1064
        cyg_mbox_put(test_mbox_handles[0], (void *)i);
1065
        cyg_semaphore_wait(&synchro);
1066
    }
1067
    cyg_thread_delete(mbox_test_thread_handle);
1068
    show_times(mbox_ft, nmboxes, "Put/Get mbox");
1069
#endif
1070
}
1071
 
1072
#endif
1073
 
1074
//--------------------------------------------------------------------------
1075
 
1076
void
1077
run_semaphore_tests(void)
1078
{
1079
 
1080
    int i;
1081
    int sem_val;
1082
 
1083
    // Semaphore primitives
1084
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1085
    for (i = 0;  i < nsemaphores;  i++) {
1086
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1087
        sem_init(&test_semaphores[i], 0, 0);
1088
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1089
    }
1090
    show_times(semaphore_ft, nsemaphores, "Init semaphore");
1091
 
1092
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1093
    for (i = 0;  i < nsemaphores;  i++) {
1094
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1095
        sem_post(&test_semaphores[i]);
1096
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1097
    }
1098
    show_times(semaphore_ft, nsemaphores, "Post [0] semaphore");
1099
 
1100
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1101
    for (i = 0;  i < nsemaphores;  i++) {
1102
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1103
        sem_wait(&test_semaphores[i]);
1104
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1105
    }
1106
    show_times(semaphore_ft, nsemaphores, "Wait [1] semaphore");
1107
 
1108
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1109
    for (i = 0;  i < nsemaphores;  i++) {
1110
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1111
        sem_trywait(&test_semaphores[i]);
1112
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1113
    }
1114
    show_times(semaphore_ft, nsemaphores, "Trywait [0] semaphore");
1115
 
1116
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1117
    for (i = 0;  i < nsemaphores;  i++) {
1118
        sem_post(&test_semaphores[i]);
1119
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1120
        sem_trywait(&test_semaphores[i]);
1121
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1122
    }
1123
    show_times(semaphore_ft, nsemaphores, "Trywait [1] semaphore");
1124
 
1125
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1126
    for (i = 0;  i < nsemaphores;  i++) {
1127
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1128
        sem_getvalue(&test_semaphores[i], &sem_val);
1129
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1130
    }
1131
    show_times(semaphore_ft, nsemaphores, "Get value of semaphore");
1132
 
1133
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1134
    for (i = 0;  i < nsemaphores;  i++) {
1135
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1136
        sem_destroy(&test_semaphores[i]);
1137
        HAL_CLOCK_READ(&semaphore_ft[i].end);
1138
    }
1139
    show_times(semaphore_ft, nsemaphores, "Destroy semaphore");
1140
 
1141
    run_semaphore_circuit_test();
1142
    end_of_test_group();
1143
}
1144
 
1145
//--------------------------------------------------------------------------
1146
 
1147
void
1148
run_semaphore_circuit_test(void)
1149
{
1150
 
1151
    int i;
1152
    struct sched_param schedparam;
1153
    pthread_attr_t attr;
1154
    void *retval;
1155
 
1156
    // Set my priority lower than any I plan to create
1157
    schedparam.sched_priority = 5;
1158
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1159
 
1160
    // Initiaize thread creation attributes
1161
 
1162
    pthread_attr_init( &attr );
1163
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1164
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
1165
    schedparam.sched_priority = 10;
1166
    pthread_attr_setschedparam( &attr, &schedparam );
1167
 
1168
    // Set up for full semaphore post/wait test
1169
    sem_init(&test_semaphores[0], 0, 0);
1170
    sem_init(&synchro, 0, 0);
1171
 
1172
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1173
    pthread_attr_setstacksize( &attr, STACK_SIZE );
1174
    pthread_create( &semaphore_test_thread_handle,
1175
                    &attr,
1176
                    semaphore_test,
1177
                    (void *)0
1178
        );
1179
 
1180
 
1181
    for (i = 0;  i < nsemaphores;  i++) {
1182
        wait_for_tick(); // Wait until the next clock tick to minimize aberations
1183
        HAL_CLOCK_READ(&semaphore_ft[i].start);
1184
        sem_post(&test_semaphores[0]);
1185
        sem_wait(&synchro);
1186
    }
1187
    pthread_join(semaphore_test_thread_handle, &retval);
1188
 
1189
    show_times(semaphore_ft, nsemaphores, "Post/Wait semaphore");
1190
 
1191
 
1192
}
1193
 
1194
//--------------------------------------------------------------------------
1195
 
1196
// Timer callback function
1197
void
1198
sigrt0(int signo, siginfo_t *info, void *context)
1199
{
1200
    diag_printf("sigrt0 called\n");
1201
    // empty call back
1202
}
1203
 
1204
// Callback used to test determinancy
1205
static volatile int timer_cnt;
1206
void
1207
sigrt1(int signo, siginfo_t *info, void *context)
1208
{
1209
    if (timer_cnt == nscheds) return;
1210
    sched_ft[timer_cnt].start = 0;
1211
    HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1212
    if (timer_cnt == nscheds) {
1213
        sem_post(&synchro);
1214
    }
1215
}
1216
 
1217
static sem_t timer_sem;
1218
 
1219
static void
1220
sigrt2(int signo, siginfo_t *info, void *context)
1221
{
1222
    if (timer_cnt == nscheds) {
1223
        sem_post(&synchro);
1224
        sem_post(&timer_sem);
1225
    } else {
1226
        sched_ft[timer_cnt].start = 0;
1227
        sem_post(&timer_sem);
1228
    }
1229
}
1230
 
1231
// Null thread, used to keep scheduler busy
1232
void *
1233
timer_test(void * id)
1234
{
1235
    while (true) {
1236
        cyg_thread_yield();
1237
        pthread_testcancel();
1238
    }
1239
 
1240
    return id;
1241
}
1242
 
1243
// Thread that suspends itself at the first opportunity
1244
void *
1245
timer_test2(void *id)
1246
{
1247
    while (timer_cnt != nscheds) {
1248
        HAL_CLOCK_READ(&sched_ft[timer_cnt++].end);
1249
        sem_wait(&timer_sem);
1250
    }
1251
    return id;
1252
}
1253
 
1254
void
1255
run_timer_tests(void)
1256
{
1257
    int res;
1258
    int i;
1259
    struct sigaction sa;
1260
    struct sigevent sigev;
1261
    struct itimerspec tp;
1262
 
1263
    // Install signal handlers
1264
    sigemptyset( &sa.sa_mask );
1265
    sa.sa_flags = SA_SIGINFO;
1266
 
1267
    sa.sa_sigaction = sigrt0;
1268
    sigaction( SIGRTMIN, &sa, NULL );
1269
 
1270
    sa.sa_sigaction = sigrt1;
1271
    sigaction( SIGRTMIN+1, &sa, NULL );
1272
 
1273
    sa.sa_sigaction = sigrt2;
1274
    sigaction( SIGRTMIN+2, &sa, NULL );
1275
 
1276
    // Set up common bits of sigevent
1277
 
1278
    sigev.sigev_notify = SIGEV_SIGNAL;
1279
 
1280
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1281
    for (i = 0;  i < ntimers;  i++) {
1282
        HAL_CLOCK_READ(&timer_ft[i].start);
1283
        sigev.sigev_signo = SIGRTMIN;
1284
        sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1285
        res = timer_create( CLOCK_REALTIME, &sigev, &timers[i]);
1286
        HAL_CLOCK_READ(&timer_ft[i].end);
1287
        CYG_ASSERT( res == 0 , "timer_create() returned error");
1288
    }
1289
    show_times(timer_ft, ntimers, "Create timer");
1290
 
1291
 
1292
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1293
    tp.it_value.tv_sec = 0;
1294
    tp.it_value.tv_nsec = 0;
1295
    tp.it_interval.tv_sec = 0;
1296
    tp.it_interval.tv_nsec = 0;
1297
    for (i = 0;  i < ntimers;  i++) {
1298
        HAL_CLOCK_READ(&timer_ft[i].start);
1299
        res = timer_settime( timers[i], 0, &tp, NULL );
1300
        HAL_CLOCK_READ(&timer_ft[i].end);
1301
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1302
    }
1303
    show_times(timer_ft, ntimers, "Initialize timer to zero");
1304
 
1305
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1306
    tp.it_value.tv_sec = 1;
1307
    tp.it_value.tv_nsec = 250000000;
1308
    tp.it_interval.tv_sec = 0;
1309
    tp.it_interval.tv_nsec = 0;
1310
    for (i = 0;  i < ntimers;  i++) {
1311
        HAL_CLOCK_READ(&timer_ft[i].start);
1312
        res = timer_settime( timers[i], 0, &tp, NULL );
1313
        HAL_CLOCK_READ(&timer_ft[i].end);
1314
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1315
    }
1316
    show_times(timer_ft, ntimers, "Initialize timer to 1.25 sec");
1317
 
1318
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1319
    tp.it_value.tv_sec = 0;
1320
    tp.it_value.tv_nsec = 0;
1321
    tp.it_interval.tv_sec = 0;
1322
    tp.it_interval.tv_nsec = 0;
1323
    for (i = 0;  i < ntimers;  i++) {
1324
        HAL_CLOCK_READ(&timer_ft[i].start);
1325
        res = timer_settime( timers[i], 0, &tp, NULL );
1326
        HAL_CLOCK_READ(&timer_ft[i].end);
1327
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1328
    }
1329
    show_times(timer_ft, ntimers, "Disable timer");
1330
 
1331
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1332
    for (i = 0;  i < ntimers;  i++) {
1333
        HAL_CLOCK_READ(&timer_ft[i].start);
1334
        res = timer_delete( timers[i] );
1335
        HAL_CLOCK_READ(&timer_ft[i].end);
1336
        CYG_ASSERT( res == 0 , "timer_settime() returned error");
1337
    }
1338
    show_times(timer_ft, ntimers, "Delete timer");
1339
 
1340
 
1341
 
1342
    sigev.sigev_signo = SIGRTMIN+1;
1343
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1344
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1345
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1346
    tp.it_value.tv_sec = 0;
1347
    tp.it_value.tv_nsec = 50000000;
1348
    tp.it_interval.tv_sec = 0;
1349
    tp.it_interval.tv_nsec = 50000000;;
1350
    timer_cnt = 0;
1351
    res = timer_settime( timers[0], 0, &tp, NULL );
1352
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1353
    sem_init(&synchro, 0, 0);
1354
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1355
    do
1356
    { res = sem_wait(&synchro);
1357
    } while( res == -1 && errno == EINTR );
1358
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1359
    tp.it_value.tv_sec = 0;
1360
    tp.it_value.tv_nsec = 0;
1361
    tp.it_interval.tv_sec = 0;
1362
    tp.it_interval.tv_nsec = 0;
1363
    res = timer_settime( timers[0], 0, &tp, NULL );
1364
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1365
    res = timer_delete( timers[0] );
1366
    CYG_ASSERT( res == 0 , "timer_delete() returned error");
1367
    show_times(sched_ft, nscheds, "Timer latency [0 threads]");
1368
 
1369
 
1370
 
1371
 
1372
    struct sched_param schedparam;
1373
    pthread_attr_t attr;
1374
    void *retval;
1375
 
1376
    // Set my priority higher than any I plan to create
1377
    schedparam.sched_priority = 20;
1378
    pthread_setschedparam( pthread_self(), SCHED_RR, &schedparam );
1379
 
1380
 
1381
    // Initiaize thread creation attributes
1382
 
1383
    pthread_attr_init( &attr );
1384
    pthread_attr_setinheritsched( &attr, PTHREAD_EXPLICIT_SCHED );
1385
    pthread_attr_setschedpolicy( &attr, SCHED_RR );
1386
    schedparam.sched_priority = 10;
1387
    pthread_attr_setschedparam( &attr, &schedparam );
1388
 
1389
    for (i = 0;  i < 2;  i++) {
1390
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1391
        pthread_attr_setstacksize( &attr, STACK_SIZE );
1392
        res = pthread_create( &threads[i],
1393
                        &attr,
1394
                        timer_test,
1395
                        (void *)i
1396
                        );
1397
        CYG_ASSERT( res == 0 , "pthread_create() returned error");
1398
    }
1399
 
1400
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1401
 
1402
    sigev.sigev_signo = SIGRTMIN+1;
1403
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1404
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1405
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1406
    tp.it_value.tv_sec = 0;
1407
    tp.it_value.tv_nsec = 50000000;
1408
    tp.it_interval.tv_sec = 0;
1409
    tp.it_interval.tv_nsec = 50000000;;
1410
    timer_cnt = 0;
1411
    res = timer_settime( timers[0], 0, &tp, NULL );
1412
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1413
 
1414
    sem_init(&synchro, 0, 0);
1415
    do
1416
    { res = sem_wait(&synchro);
1417
    } while( res == -1 && errno == EINTR );
1418
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1419
    res = timer_delete(timers[0]);
1420
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1421
    show_times(sched_ft, nscheds, "Timer latency [2 threads]");
1422
    for (i = 0;  i < 2;  i++) {
1423
        pthread_cancel(threads[i]);
1424
        pthread_join(threads[i], &retval);
1425
    }
1426
 
1427
 
1428
 
1429
    for (i = 0;  i < ntest_threads;  i++) {
1430
        pthread_attr_setstackaddr( &attr, &stacks[i][STACK_SIZE] );
1431
        pthread_attr_setstacksize( &attr, STACK_SIZE );
1432
        res = pthread_create( &threads[i],
1433
                        &attr,
1434
                        timer_test,
1435
                        (void *)i
1436
                        );
1437
        CYG_ASSERT( res == 0 , "pthread_create() returned error");
1438
    }
1439
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1440
    sigev.sigev_signo = SIGRTMIN+1;
1441
    sigev.sigev_value.sival_ptr = (void*)(&timers[i]);
1442
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1443
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1444
    tp.it_value.tv_sec = 0;
1445
    tp.it_value.tv_nsec = 50000000;
1446
    tp.it_interval.tv_sec = 0;
1447
    tp.it_interval.tv_nsec = 50000000;;
1448
    timer_cnt = 0;
1449
    res = timer_settime( timers[0], 0, &tp, NULL );
1450
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1451
 
1452
    sem_init(&synchro, 0, 0);
1453
    do
1454
    { res = sem_wait(&synchro);
1455
    } while( res == -1 && errno == EINTR );
1456
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1457
    res = timer_delete(timers[0]);
1458
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1459
    show_times(sched_ft, nscheds, "Timer latency [many threads]");
1460
    for (i = 0;  i < ntest_threads;  i++) {
1461
        pthread_cancel(threads[i]);
1462
        pthread_join(threads[i], &retval);
1463
    }
1464
 
1465
    sem_init(&synchro, 0, 0);
1466
    sem_init(&timer_sem, 0, 0);
1467
    pthread_attr_setstackaddr( &attr, &stacks[0][STACK_SIZE] );
1468
    pthread_attr_setstacksize( &attr, STACK_SIZE );
1469
    res = pthread_create( &threads[0],
1470
                          &attr,
1471
                          timer_test2,
1472
                          (void *)0
1473
        );
1474
    CYG_ASSERT( res == 0 , "pthread_create() returned error");
1475
 
1476
    wait_for_tick(); // Wait until the next clock tick to minimize aberations
1477
    sigev.sigev_signo = SIGRTMIN+2;
1478
    sigev.sigev_value.sival_ptr = (void*)(threads[0]);
1479
    res = timer_create( CLOCK_REALTIME, &sigev, &timers[0]);
1480
    CYG_ASSERT( res == 0 , "timer_create() returned error");
1481
    tp.it_value.tv_sec = 0;
1482
    tp.it_value.tv_nsec = 50000000;
1483
    tp.it_interval.tv_sec = 0;
1484
    tp.it_interval.tv_nsec = 50000000;;
1485
    timer_cnt = 0;
1486
    res = timer_settime( timers[0], 0, &tp, NULL );
1487
    CYG_ASSERT( res == 0 , "timer_settime() returned error");
1488
 
1489
    do
1490
    { res = sem_wait(&synchro);
1491
    } while( res == -1 && errno == EINTR );
1492
    CYG_ASSERT( res == 0 , "sem_wait() returned error");
1493
    res = timer_delete(timers[0]);
1494
    CYG_ASSERT( res == 0 , "timerdelete() returned error");
1495
    show_times(sched_ft, nscheds, "Timer -> thread post latency");
1496
    sem_post(&timer_sem);
1497
//    pthread_cancel(threads[0]);
1498
    pthread_join(threads[0], &retval);
1499
 
1500
 
1501
    end_of_test_group();
1502
}
1503
 
1504
 
1505
//--------------------------------------------------------------------------
1506
 
1507
void
1508
run_all_tests()
1509
{
1510
    int i;
1511
    cyg_uint32 tv[nsamples], tv0, tv1;
1512
//    cyg_uint32 min_stack, max_stack, total_stack, actual_stack, j;
1513
    cyg_tick_count_t ticks, tick0, tick1;
1514
#ifdef CYG_SCHEDULER_LOCK_TIMINGS
1515
    cyg_uint32 lock_ave, lock_max;
1516
#endif
1517
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1518
    cyg_int32 clock_ave;
1519
#endif
1520
 
1521
    disable_clock_latency_measurement();
1522
 
1523
//    cyg_test_dump_thread_stack_stats( "Startup, main stack", thread[0] );
1524
    cyg_test_dump_interrupt_stack_stats( "Startup" );
1525
    cyg_test_dump_idlethread_stack_stats( "Startup" );
1526
    cyg_test_clear_interrupt_stack();
1527
 
1528
    diag_printf("\neCos Kernel Timings\n");
1529
    diag_printf("Notes: all times are in microseconds (.000001) unless otherwise stated\n");
1530
#ifdef STATS_WITHOUT_FIRST_SAMPLE
1531
    diag_printf("       second line of results have first sample removed\n");
1532
#endif
1533
 
1534
    cyg_thread_delay(2);  // Make sure the clock is actually running
1535
 
1536
    ns_per_system_clock = 1000000/rtc_resolution[1];
1537
 
1538
    for (i = 0;  i < nsamples;  i++) {
1539
        HAL_CLOCK_READ(&tv[i]);
1540
    }
1541
    tv0 = 0;
1542
    for (i = 1;  i < nsamples;  i++) {
1543
        tv0 += tv[i] - tv[i-1];
1544
    }
1545
    end_of_test_group();
1546
 
1547
    overhead = tv0 / (nsamples-1);
1548
    diag_printf("Reading the hardware clock takes %d 'ticks' overhead\n", overhead);
1549
    diag_printf("... this value will be factored out of all other measurements\n");
1550
 
1551
    // Try and measure how long the clock interrupt handling takes
1552
    for (i = 0;  i < nsamples;  i++) {
1553
        tick0 = cyg_current_time();
1554
        while (true) {
1555
            tick1 = cyg_current_time();
1556
            if (tick0 != tick1) break;
1557
        }
1558
        HAL_CLOCK_READ(&tv[i]);
1559
    }
1560
    tv1 = 0;
1561
    for (i = 0;  i < nsamples;  i++) {
1562
        tv1 += tv[i] * 1000;
1563
    }
1564
    tv1 = tv1 / nsamples;
1565
    tv1 -= overhead;  // Adjust out the cost of getting the timer value
1566
    diag_printf("Clock interrupt took");
1567
    show_ticks_in_us(tv1);
1568
    diag_printf(" microseconds (%d raw clock ticks)\n", tv1/1000);
1569
    enable_clock_latency_measurement();
1570
 
1571
    ticks = cyg_current_time();
1572
 
1573
    show_test_parameters();
1574
    show_times_hdr();
1575
 
1576
    reset_clock_latency_measurement();
1577
 
1578
    run_thread_tests();
1579
    run_mutex_tests();
1580
//    run_mbox_tests();
1581
    run_semaphore_tests();
1582
    run_timer_tests();
1583
 
1584
#ifdef CYG_SCHEDULER_LOCK_TIMINGS
1585
    Cyg_Scheduler::get_lock_times(&lock_ave, &lock_max);
1586
    diag_printf("\nMax lock:");
1587
    show_ticks_in_us(lock_max);
1588
    diag_printf(", Ave lock:");
1589
    show_ticks_in_us(lock_ave);
1590
    diag_printf("\n");
1591
#endif
1592
 
1593
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_LATENCY) && defined(HAL_CLOCK_LATENCY)
1594
    // Display latency figures in same format as all other numbers
1595
    disable_clock_latency_measurement();
1596
    clock_ave = (total_clock_latency*1000) / total_clock_interrupts;
1597
    show_ticks_in_us(clock_ave);
1598
    show_ticks_in_us(min_clock_latency*1000);
1599
    show_ticks_in_us(max_clock_latency*1000);
1600
    show_ticks_in_us(0);
1601
    diag_printf("            Clock/interrupt latency\n\n");
1602
    enable_clock_latency_measurement();
1603
#endif
1604
 
1605
#if defined(CYGVAR_KERNEL_COUNTERS_CLOCK_DSR_LATENCY)
1606
    disable_clock_latency_measurement();
1607
    clock_ave = (total_clock_dsr_latency*1000) / total_clock_dsr_calls;
1608
    show_ticks_in_us(clock_ave);
1609
    show_ticks_in_us(min_clock_dsr_latency*1000);
1610
    show_ticks_in_us(max_clock_dsr_latency*1000);
1611
    show_ticks_in_us(0);
1612
    diag_printf("            Clock DSR latency\n\n");
1613
    enable_clock_latency_measurement();
1614
#endif
1615
 
1616
#if 0    
1617
    disable_clock_latency_measurement();
1618
    min_stack = STACK_SIZE;
1619
    max_stack = 0;
1620
    total_stack = 0;
1621
    for (i = 0;  i < (int)NTEST_THREADS;  i++) {
1622
        for (j = 0;  j < STACK_SIZE;  j++) {
1623
            if (stacks[i][j]) break;
1624
        }
1625
        actual_stack = STACK_SIZE-j;
1626
        if (actual_stack < min_stack) min_stack = actual_stack;
1627
        if (actual_stack > max_stack) max_stack = actual_stack;
1628
        total_stack += actual_stack;
1629
    }
1630
    for (j = 0;  j < STACKSIZE;  j++) {
1631
        if (((char *)stack[0])[j]) break;
1632
    }
1633
    diag_printf("%5d   %5d   %5d  (main stack: %5d)  Thread stack used (%d total)\n",
1634
                total_stack/NTEST_THREADS, min_stack, max_stack,
1635
                STACKSIZE - j, STACK_SIZE);
1636
#endif
1637
 
1638
//    cyg_test_dump_thread_stack_stats( "All done, main stack", thread[0] );
1639
    cyg_test_dump_interrupt_stack_stats( "All done" );
1640
    cyg_test_dump_idlethread_stack_stats( "All done" );
1641
 
1642
    enable_clock_latency_measurement();
1643
 
1644
    ticks = cyg_current_time();
1645
    diag_printf("\nTiming complete - %d ms total\n\n", (int)((ticks*ns_per_system_clock)/1000));
1646
 
1647
    CYG_TEST_PASS_FINISH("Basic timing OK");
1648
}
1649
 
1650
int main( int argc, char **argv )
1651
{
1652
    CYG_TEST_INIT();
1653
 
1654
    if (cyg_test_is_simulator) {
1655
        nsamples = NSAMPLES_SIM;
1656
        ntest_threads = NTEST_THREADS_SIM;
1657
        nthread_switches = NTHREAD_SWITCHES_SIM;
1658
        nmutexes = NMUTEXES_SIM;
1659
        nmboxes = NMBOXES_SIM;
1660
        nsemaphores = NSEMAPHORES_SIM;
1661
        nscheds = NSCHEDS_SIM;
1662
        ntimers = NTIMERS_SIM;
1663
    } else {
1664
        nsamples = NSAMPLES;
1665
        ntest_threads = NTEST_THREADS;
1666
        nthread_switches = NTHREAD_SWITCHES;
1667
        nmutexes = NMUTEXES;
1668
        nmboxes = NMBOXES;
1669
        nsemaphores = NSEMAPHORES;
1670
        nscheds = NSCHEDS;
1671
        ntimers = NTIMERS;
1672
    }
1673
 
1674
    // Sanity
1675
#ifdef WORKHORSE_TEST
1676
    ntest_threads = max(512, ntest_threads);
1677
    nmutexes = max(1024, nmutexes);
1678
    nsemaphores = max(1024, nsemaphores);
1679
    nmboxes = max(1024, nmboxes);
1680
    ncounters = max(1024, ncounters);
1681
    ntimers = max(1024, ntimers);
1682
#else
1683
    ntest_threads = max(64, ntest_threads);
1684
    nmutexes = max(32, nmutexes);
1685
    nsemaphores = max(32, nsemaphores);
1686
    nmboxes = max(32, nmboxes);
1687
    ntimers = max(32, ntimers);
1688
#endif
1689
 
1690
    run_all_tests();
1691
 
1692
}
1693
 
1694
#endif // CYGFUN_KERNEL_API_C, etc.
1695
 
1696
// EOF tm_basic.cxx

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.