OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rtems-20020807/] [cpukit/] [score/] [cpu/] [mips64orion/] [rtems/] [score/] [cpu.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1026 ivang
/*  cpu.h
2
 *
3
 *  This include file contains information pertaining to the IDT 4650
4
 *  processor.
5
 *
6
 *  Author:     Craig Lebakken <craigl@transition.com>
7
 *
8
 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
9
 *
10
 *  To anyone who acknowledges that this file is provided "AS IS"
11
 *  without any express or implied warranty:
12
 *      permission to use, copy, modify, and distribute this file
13
 *      for any purpose is hereby granted without fee, provided that
14
 *      the above copyright notice and this notice appears in all
15
 *      copies, and that the name of Transition Networks not be used in
16
 *      advertising or publicity pertaining to distribution of the
17
 *      software without specific, written prior permission.
18
 *      Transition Networks makes no representations about the suitability
19
 *      of this software for any purpose.
20
 *
21
 *  Derived from source copyrighted as follows:
22
 *
23
 *  COPYRIGHT (c) 1989-1999.
24
 *  On-Line Applications Research Corporation (OAR).
25
 *
26
 *  The license and distribution terms for this file may be
27
 *  found in the file LICENSE in this distribution or at
28
 *  http://www.OARcorp.com/rtems/license.html.
29
 *
30
 *  cpu.h,v 1.11 2002/04/03 14:05:25 joel Exp
31
 */
32
/* @(#)cpu.h       08/29/96     1.7 */
33
 
34
#ifndef __CPU_h
35
#define __CPU_h
36
 
37
#ifdef __cplusplus
38
extern "C" {
39
#endif
40
 
41
#include <rtems/score/mips64orion.h>       /* pick up machine definitions */
42
#ifndef ASM
43
#include <rtems/score/types.h>
44
#endif
45
 
46
extern int mips_disable_interrupts( void );
47
extern void mips_enable_interrupts( int _level );
48
extern int mips_disable_global_interrupts( void );
49
extern void mips_enable_global_interrupts( void );
50
extern void mips_fatal_error ( int error );
51
 
52
/* conditional compilation parameters */
53
 
54
/*
55
 *  Should the calls to _Thread_Enable_dispatch be inlined?
56
 *
57
 *  If TRUE, then they are inlined.
58
 *  If FALSE, then a subroutine call is made.
59
 *
60
 *  Basically this is an example of the classic trade-off of size
61
 *  versus speed.  Inlining the call (TRUE) typically increases the
62
 *  size of RTEMS while speeding up the enabling of dispatching.
63
 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
64
 *  only be 0 or 1 unless you are in an interrupt handler and that
65
 *  interrupt handler invokes the executive.]  When not inlined
66
 *  something calls _Thread_Enable_dispatch which in turns calls
67
 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
68
 *  one subroutine call is avoided entirely.]
69
 */
70
 
71
#define CPU_INLINE_ENABLE_DISPATCH       TRUE
72
 
73
/*
74
 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
75
 *  be unrolled one time?  In unrolled each iteration of the loop examines
76
 *  two "nodes" on the chain being searched.  Otherwise, only one node
77
 *  is examined per iteration.
78
 *
79
 *  If TRUE, then the loops are unrolled.
80
 *  If FALSE, then the loops are not unrolled.
81
 *
82
 *  The primary factor in making this decision is the cost of disabling
83
 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
84
 *  body of the loop.  On some CPUs, the flash is more expensive than
85
 *  one iteration of the loop body.  In this case, it might be desirable
86
 *  to unroll the loop.  It is important to note that on some CPUs, this
87
 *  code is the longest interrupt disable period in RTEMS.  So it is
88
 *  necessary to strike a balance when setting this parameter.
89
 */
90
 
91
#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
92
 
93
/*
94
 *  Does RTEMS manage a dedicated interrupt stack in software?
95
 *
96
 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
97
 *  If FALSE, nothing is done.
98
 *
99
 *  If the CPU supports a dedicated interrupt stack in hardware,
100
 *  then it is generally the responsibility of the BSP to allocate it
101
 *  and set it up.
102
 *
103
 *  If the CPU does not support a dedicated interrupt stack, then
104
 *  the porter has two options: (1) execute interrupts on the
105
 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
106
 *  interrupt stack.
107
 *
108
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
109
 *
110
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
111
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
112
 *  possible that both are FALSE for a particular CPU.  Although it
113
 *  is unclear what that would imply about the interrupt processing
114
 *  procedure on that CPU.
115
 */
116
 
117
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
118
 
119
/*
120
 *  Does this CPU have hardware support for a dedicated interrupt stack?
121
 *
122
 *  If TRUE, then it must be installed during initialization.
123
 *  If FALSE, then no installation is performed.
124
 *
125
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
126
 *
127
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
128
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
129
 *  possible that both are FALSE for a particular CPU.  Although it
130
 *  is unclear what that would imply about the interrupt processing
131
 *  procedure on that CPU.
132
 */
133
 
134
#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
135
 
136
/*
137
 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
138
 *
139
 *  If TRUE, then the memory is allocated during initialization.
140
 *  If FALSE, then the memory is allocated during initialization.
141
 *
142
 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
143
 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
144
 */
145
 
146
#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
147
 
148
/*
149
 *  Does the RTEMS invoke the user's ISR with the vector number and
150
 *  a pointer to the saved interrupt frame (1) or just the vector
151
 *  number (0)?
152
 */
153
 
154
#define CPU_ISR_PASSES_FRAME_POINTER 0
155
 
156
/*
157
 *  Does the CPU have hardware floating point?
158
 *
159
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
160
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
161
 *
162
 *  If there is a FP coprocessor such as the i387 or mc68881, then
163
 *  the answer is TRUE.
164
 *
165
 *  The macro name "MIPS64ORION_HAS_FPU" should be made CPU specific.
166
 *  It indicates whether or not this CPU model has FP support.  For
167
 *  example, it would be possible to have an i386_nofp CPU model
168
 *  which set this to false to indicate that you have an i386 without
169
 *  an i387 and wish to leave floating point support out of RTEMS.
170
 */
171
 
172
#if ( MIPS64ORION_HAS_FPU == 1 )
173
#define CPU_HARDWARE_FP     TRUE
174
#else
175
#define CPU_HARDWARE_FP     FALSE
176
#endif
177
#define CPU_SOFTWARE_FP     FALSE
178
 
179
/*
180
 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
181
 *
182
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
183
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
184
 *
185
 *  So far, the only CPU in which this option has been used is the
186
 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
187
 *  floating point registers to perform integer multiplies.  If
188
 *  a function which you would not think utilize the FP unit DOES,
189
 *  then one can not easily predict which tasks will use the FP hardware.
190
 *  In this case, this option should be TRUE.
191
 *
192
 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
193
 */
194
 
195
#define CPU_ALL_TASKS_ARE_FP    FALSE
196
 
197
/*
198
 *  Should the IDLE task have a floating point context?
199
 *
200
 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
201
 *  and it has a floating point context which is switched in and out.
202
 *  If FALSE, then the IDLE task does not have a floating point context.
203
 *
204
 *  Setting this to TRUE negatively impacts the time required to preempt
205
 *  the IDLE task from an interrupt because the floating point context
206
 *  must be saved as part of the preemption.
207
 */
208
 
209
#define CPU_IDLE_TASK_IS_FP      FALSE
210
 
211
/*
212
 *  Should the saving of the floating point registers be deferred
213
 *  until a context switch is made to another different floating point
214
 *  task?
215
 *
216
 *  If TRUE, then the floating point context will not be stored until
217
 *  necessary.  It will remain in the floating point registers and not
218
 *  disturned until another floating point task is switched to.
219
 *
220
 *  If FALSE, then the floating point context is saved when a floating
221
 *  point task is switched out and restored when the next floating point
222
 *  task is restored.  The state of the floating point registers between
223
 *  those two operations is not specified.
224
 *
225
 *  If the floating point context does NOT have to be saved as part of
226
 *  interrupt dispatching, then it should be safe to set this to TRUE.
227
 *
228
 *  Setting this flag to TRUE results in using a different algorithm
229
 *  for deciding when to save and restore the floating point context.
230
 *  The deferred FP switch algorithm minimizes the number of times
231
 *  the FP context is saved and restored.  The FP context is not saved
232
 *  until a context switch is made to another, different FP task.
233
 *  Thus in a system with only one FP task, the FP context will never
234
 *  be saved or restored.
235
 */
236
 
237
#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
238
 
239
/*
240
 *  Does this port provide a CPU dependent IDLE task implementation?
241
 *
242
 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
243
 *  must be provided and is the default IDLE thread body instead of
244
 *  _Internal_threads_Idle_thread_body.
245
 *
246
 *  If FALSE, then use the generic IDLE thread body if the BSP does
247
 *  not provide one.
248
 *
249
 *  This is intended to allow for supporting processors which have
250
 *  a low power or idle mode.  When the IDLE thread is executed, then
251
 *  the CPU can be powered down.
252
 *
253
 *  The order of precedence for selecting the IDLE thread body is:
254
 *
255
 *    1.  BSP provided
256
 *    2.  CPU dependent (if provided)
257
 *    3.  generic (if no BSP and no CPU dependent)
258
 */
259
 
260
/* we can use the low power wait instruction for the IDLE thread */
261
#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE 
262
 
263
/*
264
 *  Does the stack grow up (toward higher addresses) or down
265
 *  (toward lower addresses)?
266
 *
267
 *  If TRUE, then the grows upward.
268
 *  If FALSE, then the grows toward smaller addresses.
269
 */
270
 
271
/* our stack grows down */
272
#define CPU_STACK_GROWS_UP               FALSE
273
 
274
/*
275
 *  The following is the variable attribute used to force alignment
276
 *  of critical RTEMS structures.  On some processors it may make
277
 *  sense to have these aligned on tighter boundaries than
278
 *  the minimum requirements of the compiler in order to have as
279
 *  much of the critical data area as possible in a cache line.
280
 *
281
 *  The placement of this macro in the declaration of the variables
282
 *  is based on the syntactically requirements of the GNU C
283
 *  "__attribute__" extension.  For example with GNU C, use
284
 *  the following to force a structures to a 32 byte boundary.
285
 *
286
 *      __attribute__ ((aligned (32)))
287
 *
288
 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
289
 *         To benefit from using this, the data must be heavily
290
 *         used so it will stay in the cache and used frequently enough
291
 *         in the executive to justify turning this on.
292
 */
293
 
294
/* our cache line size is 16 bytes */
295
#if __GNUC__
296
#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
297
#else
298
#define CPU_STRUCTURE_ALIGNMENT 
299
#endif
300
 
301
/*
302
 *  Define what is required to specify how the network to host conversion
303
 *  routines are handled.
304
 */
305
 
306
#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
307
#define CPU_BIG_ENDIAN                           TRUE
308
#define CPU_LITTLE_ENDIAN                        FALSE
309
 
310
/*
311
 *  The following defines the number of bits actually used in the
312
 *  interrupt field of the task mode.  How those bits map to the
313
 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
314
 */
315
 
316
#define CPU_MODES_INTERRUPT_MASK   0x00000001
317
 
318
/*
319
 *  Processor defined structures
320
 *
321
 *  Examples structures include the descriptor tables from the i386
322
 *  and the processor control structure on the i960ca.
323
 */
324
 
325
/* may need to put some structures here.  */
326
 
327
/*
328
 * Contexts
329
 *
330
 *  Generally there are 2 types of context to save.
331
 *     1. Interrupt registers to save
332
 *     2. Task level registers to save
333
 *
334
 *  This means we have the following 3 context items:
335
 *     1. task level context stuff::  Context_Control
336
 *     2. floating point task stuff:: Context_Control_fp
337
 *     3. special interrupt level context :: Context_Control_interrupt
338
 *
339
 *  On some processors, it is cost-effective to save only the callee
340
 *  preserved registers during a task context switch.  This means
341
 *  that the ISR code needs to save those registers which do not
342
 *  persist across function calls.  It is not mandatory to make this
343
 *  distinctions between the caller/callee saves registers for the
344
 *  purpose of minimizing context saved during task switch and on interrupts.
345
 *  If the cost of saving extra registers is minimal, simplicity is the
346
 *  choice.  Save the same context on interrupt entry as for tasks in
347
 *  this case.
348
 *
349
 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
350
 *  care should be used in designing the context area.
351
 *
352
 *  On some CPUs with hardware floating point support, the Context_Control_fp
353
 *  structure will not be used or it simply consist of an array of a
354
 *  fixed number of bytes.   This is done when the floating point context
355
 *  is dumped by a "FP save context" type instruction and the format
356
 *  is not really defined by the CPU.  In this case, there is no need
357
 *  to figure out the exact format -- only the size.  Of course, although
358
 *  this is enough information for RTEMS, it is probably not enough for
359
 *  a debugger such as gdb.  But that is another problem.
360
 */
361
 
362
/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
363
typedef struct {
364
    unsigned64 s0;
365
    unsigned64 s1;
366
    unsigned64 s2;
367
    unsigned64 s3;
368
    unsigned64 s4;
369
    unsigned64 s5;
370
    unsigned64 s6;
371
    unsigned64 s7;
372
    unsigned64 sp;
373
    unsigned64 fp;
374
    unsigned64 ra;
375
    unsigned64 c0_sr;
376
    unsigned64 c0_epc;
377
} Context_Control;
378
 
379
/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
380
typedef struct {
381
    unsigned32      fp0;
382
    unsigned32      fp1;
383
    unsigned32      fp2;
384
    unsigned32      fp3;
385
    unsigned32      fp4;
386
    unsigned32      fp5;
387
    unsigned32      fp6;
388
    unsigned32      fp7;
389
    unsigned32      fp8;
390
    unsigned32      fp9;
391
    unsigned32      fp10;
392
    unsigned32      fp11;
393
    unsigned32      fp12;
394
    unsigned32      fp13;
395
    unsigned32      fp14;
396
    unsigned32      fp15;
397
    unsigned32      fp16;
398
    unsigned32      fp17;
399
    unsigned32      fp18;
400
    unsigned32      fp19;
401
    unsigned32      fp20;
402
    unsigned32      fp21;
403
    unsigned32      fp22;
404
    unsigned32      fp23;
405
    unsigned32      fp24;
406
    unsigned32      fp25;
407
    unsigned32      fp26;
408
    unsigned32      fp27;
409
    unsigned32      fp28;
410
    unsigned32      fp29;
411
    unsigned32      fp30;
412
    unsigned32      fp31;
413
} Context_Control_fp;
414
 
415
typedef struct {
416
    unsigned32 special_interrupt_register;
417
} CPU_Interrupt_frame;
418
 
419
 
420
/*
421
 *  The following table contains the information required to configure
422
 *  the mips processor specific parameters.
423
 */
424
 
425
typedef struct {
426
  void       (*pretasking_hook)( void );
427
  void       (*predriver_hook)( void );
428
  void       (*postdriver_hook)( void );
429
  void       (*idle_task)( void );
430
  boolean      do_zero_of_workspace;
431
  unsigned32   idle_task_stack_size;
432
  unsigned32   interrupt_stack_size;
433
  unsigned32   extra_mpci_receive_server_stack;
434
  void *     (*stack_allocate_hook)( unsigned32 );
435
  void       (*stack_free_hook)( void* );
436
  /* end of fields required on all CPUs */
437
 
438
  unsigned32   clicks_per_microsecond;
439
}   rtems_cpu_table;
440
 
441
/*
442
 *  Macros to access required entires in the CPU Table are in
443
 *  the file rtems/system.h.
444
 */
445
 
446
/*
447
 *  Macros to access MIPS64ORION specific additions to the CPU Table
448
 */
449
 
450
#define rtems_cpu_configuration_get_clicks_per_microsecond() \
451
   (_CPU_Table.clicks_per_microsecond)
452
 
453
/*
454
 *  This variable is optional.  It is used on CPUs on which it is difficult
455
 *  to generate an "uninitialized" FP context.  It is filled in by
456
 *  _CPU_Initialize and copied into the task's FP context area during
457
 *  _CPU_Context_Initialize.
458
 */
459
 
460
SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
461
 
462
/*
463
 *  On some CPUs, RTEMS supports a software managed interrupt stack.
464
 *  This stack is allocated by the Interrupt Manager and the switch
465
 *  is performed in _ISR_Handler.  These variables contain pointers
466
 *  to the lowest and highest addresses in the chunk of memory allocated
467
 *  for the interrupt stack.  Since it is unknown whether the stack
468
 *  grows up or down (in general), this give the CPU dependent
469
 *  code the option of picking the version it wants to use.
470
 *
471
 *  NOTE: These two variables are required if the macro
472
 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
473
 */
474
 
475
SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
476
SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
477
 
478
/*
479
 *  With some compilation systems, it is difficult if not impossible to
480
 *  call a high-level language routine from assembly language.  This
481
 *  is especially true of commercial Ada compilers and name mangling
482
 *  C++ ones.  This variable can be optionally defined by the CPU porter
483
 *  and contains the address of the routine _Thread_Dispatch.  This
484
 *  can make it easier to invoke that routine at the end of the interrupt
485
 *  sequence (if a dispatch is necessary).
486
 */
487
 
488
SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
489
 
490
/*
491
 *  Nothing prevents the porter from declaring more CPU specific variables.
492
 */
493
 
494
/* XXX: if needed, put more variables here */
495
 
496
/*
497
 *  The size of the floating point context area.  On some CPUs this
498
 *  will not be a "sizeof" because the format of the floating point
499
 *  area is not defined -- only the size is.  This is usually on
500
 *  CPUs with a "floating point save context" instruction.
501
 */
502
 
503
#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
504
 
505
/*
506
 *  Amount of extra stack (above minimum stack size) required by
507
 *  system initialization thread.  Remember that in a multiprocessor
508
 *  system the system intialization thread becomes the MP server thread.
509
 */
510
 
511
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
512
 
513
/*
514
 *  This defines the number of entries in the ISR_Vector_table managed
515
 *  by RTEMS.
516
 */
517
 
518
#define CPU_INTERRUPT_NUMBER_OF_VECTORS      8
519
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
520
 
521
/*
522
 *  This is defined if the port has a special way to report the ISR nesting
523
 *  level.  Most ports maintain the variable _ISR_Nest_level.
524
 */
525
 
526
#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
527
 
528
/*
529
 *  Should be large enough to run all RTEMS tests.  This insures
530
 *  that a "reasonable" small application should not have any problems.
531
 */
532
 
533
#define CPU_STACK_MINIMUM_SIZE          (2048*sizeof(unsigned32))
534
 
535
/*
536
 *  CPU's worst alignment requirement for data types on a byte boundary.  This
537
 *  alignment does not take into account the requirements for the stack.
538
 */
539
 
540
#define CPU_ALIGNMENT              8
541
 
542
/*
543
 *  This number corresponds to the byte alignment requirement for the
544
 *  heap handler.  This alignment requirement may be stricter than that
545
 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
546
 *  common for the heap to follow the same alignment requirement as
547
 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
548
 *  then this should be set to CPU_ALIGNMENT.
549
 *
550
 *  NOTE:  This does not have to be a power of 2.  It does have to
551
 *         be greater or equal to than CPU_ALIGNMENT.
552
 */
553
 
554
#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
555
 
556
/*
557
 *  This number corresponds to the byte alignment requirement for memory
558
 *  buffers allocated by the partition manager.  This alignment requirement
559
 *  may be stricter than that for the data types alignment specified by
560
 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
561
 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
562
 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
563
 *
564
 *  NOTE:  This does not have to be a power of 2.  It does have to
565
 *         be greater or equal to than CPU_ALIGNMENT.
566
 */
567
 
568
#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
569
 
570
/*
571
 *  This number corresponds to the byte alignment requirement for the
572
 *  stack.  This alignment requirement may be stricter than that for the
573
 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
574
 *  is strict enough for the stack, then this should be set to 0.
575
 *
576
 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
577
 */
578
 
579
#define CPU_STACK_ALIGNMENT        CPU_ALIGNMENT
580
 
581
/*
582
 * ISR handler macros
583
 */
584
 
585
/*
586
 *  Support routine to initialize the RTEMS vector table after it is allocated.
587
 */
588
 
589
#define _CPU_Initialize_vectors()
590
 
591
/*
592
 *  Disable all interrupts for an RTEMS critical section.  The previous
593
 *  level is returned in _level.
594
 */
595
 
596
#define _CPU_ISR_Disable( _int_level ) \
597
  do{ \
598
        _int_level = mips_disable_interrupts(); \
599
  }while(0)
600
 
601
/*
602
 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
603
 *  This indicates the end of an RTEMS critical section.  The parameter
604
 *  _level is not modified.
605
 */
606
 
607
#define _CPU_ISR_Enable( _level )  \
608
  do{ \
609
        mips_enable_interrupts(_level); \
610
  }while(0)
611
 
612
/*
613
 *  This temporarily restores the interrupt to _level before immediately
614
 *  disabling them again.  This is used to divide long RTEMS critical
615
 *  sections into two or more parts.  The parameter _level is not
616
 * modified.
617
 */
618
 
619
#define _CPU_ISR_Flash( _xlevel ) \
620
  do{ \
621
        int _scratch; \
622
        _CPU_ISR_Enable( _xlevel ); \
623
        _CPU_ISR_Disable( _scratch ); \
624
  }while(0)
625
 
626
/*
627
 *  Map interrupt level in task mode onto the hardware that the CPU
628
 *  actually provides.  Currently, interrupt levels which do not
629
 *  map onto the CPU in a generic fashion are undefined.  Someday,
630
 *  it would be nice if these were "mapped" by the application
631
 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
632
 *  8 - 255 would be available for bsp/application specific meaning.
633
 *  This could be used to manage a programmable interrupt controller
634
 *  via the rtems_task_mode directive.
635
 */
636
extern void _CPU_ISR_Set_level( unsigned32 _new_level );
637
 
638
unsigned32 _CPU_ISR_Get_level( void );
639
 
640
/* end of ISR handler macros */
641
 
642
/* Context handler macros */
643
 
644
/*
645
 *  Initialize the context to a state suitable for starting a
646
 *  task after a context restore operation.  Generally, this
647
 *  involves:
648
 *
649
 *     - setting a starting address
650
 *     - preparing the stack
651
 *     - preparing the stack and frame pointers
652
 *     - setting the proper interrupt level in the context
653
 *     - initializing the floating point context
654
 *
655
 *  This routine generally does not set any unnecessary register
656
 *  in the context.  The state of the "general data" registers is
657
 *  undefined at task start time.
658
 *
659
 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
660
 *        point thread.  This is typically only used on CPUs where the
661
 *        FPU may be easily disabled by software such as on the SPARC
662
 *        where the PSR contains an enable FPU bit.
663
 */
664
 
665
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
666
                                 _isr, _entry_point, _is_fp ) \
667
  { \
668
        unsigned32 _stack_tmp = (unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
669
        _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
670
        (_the_context)->sp = _stack_tmp; \
671
        (_the_context)->fp = _stack_tmp; \
672
        (_the_context)->ra = (unsigned64)_entry_point; \
673
        (_the_context)->c0_sr = 0; \
674
  }
675
 
676
/*
677
 *  This routine is responsible for somehow restarting the currently
678
 *  executing task.  If you are lucky, then all that is necessary
679
 *  is restoring the context.  Otherwise, there will need to be
680
 *  a special assembly routine which does something special in this
681
 *  case.  Context_Restore should work most of the time.  It will
682
 *  not work if restarting self conflicts with the stack frame
683
 *  assumptions of restoring a context.
684
 */
685
 
686
#define _CPU_Context_Restart_self( _the_context ) \
687
   _CPU_Context_restore( (_the_context) );
688
 
689
/*
690
 *  The purpose of this macro is to allow the initial pointer into
691
 *  A floating point context area (used to save the floating point
692
 *  context) to be at an arbitrary place in the floating point
693
 *  context area.
694
 *
695
 *  This is necessary because some FP units are designed to have
696
 *  their context saved as a stack which grows into lower addresses.
697
 *  Other FP units can be saved by simply moving registers into offsets
698
 *  from the base of the context area.  Finally some FP units provide
699
 *  a "dump context" instruction which could fill in from high to low
700
 *  or low to high based on the whim of the CPU designers.
701
 */
702
 
703
#define _CPU_Context_Fp_start( _base, _offset ) \
704
   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
705
 
706
/*
707
 *  This routine initializes the FP context area passed to it to.
708
 *  There are a few standard ways in which to initialize the
709
 *  floating point context.  The code included for this macro assumes
710
 *  that this is a CPU in which a "initial" FP context was saved into
711
 *  _CPU_Null_fp_context and it simply copies it to the destination
712
 *  context passed to it.
713
 *
714
 *  Other models include (1) not doing anything, and (2) putting
715
 *  a "null FP status word" in the correct place in the FP context.
716
 */
717
 
718
#define _CPU_Context_Initialize_fp( _destination ) \
719
  { \
720
   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
721
  }
722
 
723
/* end of Context handler macros */
724
 
725
/* Fatal Error manager macros */
726
 
727
/*
728
 *  This routine copies _error into a known place -- typically a stack
729
 *  location or a register, optionally disables interrupts, and
730
 *  halts/stops the CPU.
731
 */
732
 
733
#define _CPU_Fatal_halt( _error ) \
734
  { \
735
    mips_disable_global_interrupts(); \
736
    mips_fatal_error(_error); \
737
  }
738
 
739
/* end of Fatal Error manager macros */
740
 
741
/* Bitfield handler macros */
742
 
743
/*
744
 *  This routine sets _output to the bit number of the first bit
745
 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
746
 *  This type may be either 16 or 32 bits wide although only the 16
747
 *  least significant bits will be used.
748
 *
749
 *  There are a number of variables in using a "find first bit" type
750
 *  instruction.
751
 *
752
 *    (1) What happens when run on a value of zero?
753
 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
754
 *    (3) The numbering may be zero or one based.
755
 *    (4) The "find first bit" instruction may search from MSB or LSB.
756
 *
757
 *  RTEMS guarantees that (1) will never happen so it is not a concern.
758
 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
759
 *  _CPU_Priority_bits_index().  These three form a set of routines
760
 *  which must logically operate together.  Bits in the _value are
761
 *  set and cleared based on masks built by _CPU_Priority_mask().
762
 *  The basic major and minor values calculated by _Priority_Major()
763
 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
764
 *  to properly range between the values returned by the "find first bit"
765
 *  instruction.  This makes it possible for _Priority_Get_highest() to
766
 *  calculate the major and directly index into the minor table.
767
 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
768
 *  is the first bit found.
769
 *
770
 *  This entire "find first bit" and mapping process depends heavily
771
 *  on the manner in which a priority is broken into a major and minor
772
 *  components with the major being the 4 MSB of a priority and minor
773
 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
774
 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
775
 *  to the lowest priority.
776
 *
777
 *  If your CPU does not have a "find first bit" instruction, then
778
 *  there are ways to make do without it.  Here are a handful of ways
779
 *  to implement this in software:
780
 *
781
 *    - a series of 16 bit test instructions
782
 *    - a "binary search using if's"
783
 *    - _number = 0
784
 *      if _value > 0x00ff
785
 *        _value >>=8
786
 *        _number = 8;
787
 *
788
 *      if _value > 0x0000f
789
 *        _value >=8
790
 *        _number += 4
791
 *
792
 *      _number += bit_set_table[ _value ]
793
 *
794
 *    where bit_set_table[ 16 ] has values which indicate the first
795
 *      bit set
796
 */
797
 
798
#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
799
#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
800
 
801
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
802
 
803
#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
804
  { \
805
    (_output) = 0;   /* do something to prevent warnings */ \
806
  }
807
 
808
#endif
809
 
810
/* end of Bitfield handler macros */
811
 
812
/*
813
 *  This routine builds the mask which corresponds to the bit fields
814
 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
815
 *  for that routine.
816
 */
817
 
818
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
819
 
820
#define _CPU_Priority_Mask( _bit_number ) \
821
  ( 1 << (_bit_number) )
822
 
823
#endif
824
 
825
/*
826
 *  This routine translates the bit numbers returned by
827
 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
828
 *  a major or minor component of a priority.  See the discussion
829
 *  for that routine.
830
 */
831
 
832
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
833
 
834
#define _CPU_Priority_bits_index( _priority ) \
835
  (_priority)
836
 
837
#endif
838
 
839
/* end of Priority handler macros */
840
 
841
/* functions */
842
 
843
/*
844
 *  _CPU_Initialize
845
 *
846
 *  This routine performs CPU dependent initialization.
847
 */
848
 
849
void _CPU_Initialize(
850
  rtems_cpu_table  *cpu_table,
851
  void      (*thread_dispatch)
852
);
853
 
854
/*
855
 *  _CPU_ISR_install_raw_handler
856
 *
857
 *  This routine installs a "raw" interrupt handler directly into the
858
 *  processor's vector table.
859
 */
860
 
861
void _CPU_ISR_install_raw_handler(
862
  unsigned32  vector,
863
  proc_ptr    new_handler,
864
  proc_ptr   *old_handler
865
);
866
 
867
/*
868
 *  _CPU_ISR_install_vector
869
 *
870
 *  This routine installs an interrupt vector.
871
 */
872
 
873
void _CPU_ISR_install_vector(
874
  unsigned32  vector,
875
  proc_ptr    new_handler,
876
  proc_ptr   *old_handler
877
);
878
 
879
/*
880
 *  _CPU_Install_interrupt_stack
881
 *
882
 *  This routine installs the hardware interrupt stack pointer.
883
 *
884
 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
885
 *         is TRUE.
886
 */
887
 
888
void _CPU_Install_interrupt_stack( void );
889
 
890
/*
891
 *  _CPU_Internal_threads_Idle_thread_body
892
 *
893
 *  This routine is the CPU dependent IDLE thread body.
894
 *
895
 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
896
 *         is TRUE.
897
 */
898
 
899
void _CPU_Thread_Idle_body( void );
900
 
901
/*
902
 *  _CPU_Context_switch
903
 *
904
 *  This routine switches from the run context to the heir context.
905
 */
906
 
907
void _CPU_Context_switch(
908
  Context_Control  *run,
909
  Context_Control  *heir
910
);
911
 
912
/*
913
 *  _CPU_Context_restore
914
 *
915
 *  This routine is generally used only to restart self in an
916
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
917
 *
918
 *  NOTE: May be unnecessary to reload some registers.
919
 */
920
 
921
void _CPU_Context_restore(
922
  Context_Control *new_context
923
);
924
 
925
/*
926
 *  _CPU_Context_save_fp
927
 *
928
 *  This routine saves the floating point context passed to it.
929
 */
930
 
931
void _CPU_Context_save_fp(
932
  void **fp_context_ptr
933
);
934
 
935
/*
936
 *  _CPU_Context_restore_fp
937
 *
938
 *  This routine restores the floating point context passed to it.
939
 */
940
 
941
void _CPU_Context_restore_fp(
942
  void **fp_context_ptr
943
);
944
 
945
/*  The following routine swaps the endian format of an unsigned int.
946
 *  It must be static because it is referenced indirectly.
947
 *
948
 *  This version will work on any processor, but if there is a better
949
 *  way for your CPU PLEASE use it.  The most common way to do this is to:
950
 *
951
 *     swap least significant two bytes with 16-bit rotate
952
 *     swap upper and lower 16-bits
953
 *     swap most significant two bytes with 16-bit rotate
954
 *
955
 *  Some CPUs have special instructions which swap a 32-bit quantity in
956
 *  a single instruction (e.g. i486).  It is probably best to avoid
957
 *  an "endian swapping control bit" in the CPU.  One good reason is
958
 *  that interrupts would probably have to be disabled to insure that
959
 *  an interrupt does not try to access the same "chunk" with the wrong
960
 *  endian.  Another good reason is that on some CPUs, the endian bit
961
 *  endianness for ALL fetches -- both code and data -- so the code
962
 *  will be fetched incorrectly.
963
 */
964
 
965
static inline unsigned int CPU_swap_u32(
966
  unsigned int value
967
)
968
{
969
  unsigned32 byte1, byte2, byte3, byte4, swapped;
970
 
971
  byte4 = (value >> 24) & 0xff;
972
  byte3 = (value >> 16) & 0xff;
973
  byte2 = (value >> 8)  & 0xff;
974
  byte1 =  value        & 0xff;
975
 
976
  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
977
  return( swapped );
978
}
979
 
980
#define CPU_swap_u16( value ) \
981
  (((value&0xff) << 8) | ((value >> 8)&0xff))
982
 
983
/*
984
 *  Miscellaneous prototypes
985
 *
986
 *  NOTE:  The names should have mips64orion in them.
987
 */
988
 
989
void disable_int( unsigned32 mask );
990
void enable_int( unsigned32 mask );
991
 
992
#ifdef __cplusplus
993
}
994
#endif
995
 
996
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.