OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [exec/] [score/] [cpu/] [mips64orion/] [rtems/] [score/] [cpu.h] - Blame information for rev 30

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 30 unneback
/*  cpu.h
2
 *
3
 *  This include file contains information pertaining to the IDT 4650
4
 *  processor.
5
 *
6
 *  Author:     Craig Lebakken <craigl@transition.com>
7
 *
8
 *  COPYRIGHT (c) 1996 by Transition Networks Inc.
9
 *
10
 *  To anyone who acknowledges that this file is provided "AS IS"
11
 *  without any express or implied warranty:
12
 *      permission to use, copy, modify, and distribute this file
13
 *      for any purpose is hereby granted without fee, provided that
14
 *      the above copyright notice and this notice appears in all
15
 *      copies, and that the name of Transition Networks not be used in
16
 *      advertising or publicity pertaining to distribution of the
17
 *      software without specific, written prior permission.
18
 *      Transition Networks makes no representations about the suitability
19
 *      of this software for any purpose.
20
 *
21
 *  Derived from c/src/exec/score/cpu/no_cpu/cpu.h:
22
 *
23
 *  COPYRIGHT (c) 1989-1999.
24
 *  On-Line Applications Research Corporation (OAR).
25
 *
26
 *  The license and distribution terms for this file may be
27
 *  found in the file LICENSE in this distribution or at
28
 *  http://www.OARcorp.com/rtems/license.html.
29
 *
30
 *  $Id: cpu.h,v 1.2 2001-09-27 11:59:28 chris Exp $
31
 */
32
/* @(#)cpu.h       08/29/96     1.7 */
33
 
34
#ifndef __CPU_h
35
#define __CPU_h
36
 
37
#ifdef __cplusplus
38
extern "C" {
39
#endif
40
 
41
#include <rtems/score/mips64orion.h>       /* pick up machine definitions */
42
#ifndef ASM
43
#include <rtems/score/mipstypes.h>
44
#endif
45
 
46
extern int mips_disable_interrupts( void );
47
extern void mips_enable_interrupts( int _level );
48
extern int mips_disable_global_interrupts( void );
49
extern void mips_enable_global_interrupts( void );
50
extern void mips_fatal_error ( int error );
51
 
52
/* conditional compilation parameters */
53
 
54
/*
55
 *  Should the calls to _Thread_Enable_dispatch be inlined?
56
 *
57
 *  If TRUE, then they are inlined.
58
 *  If FALSE, then a subroutine call is made.
59
 *
60
 *  Basically this is an example of the classic trade-off of size
61
 *  versus speed.  Inlining the call (TRUE) typically increases the
62
 *  size of RTEMS while speeding up the enabling of dispatching.
63
 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
64
 *  only be 0 or 1 unless you are in an interrupt handler and that
65
 *  interrupt handler invokes the executive.]  When not inlined
66
 *  something calls _Thread_Enable_dispatch which in turns calls
67
 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
68
 *  one subroutine call is avoided entirely.]
69
 */
70
 
71
#define CPU_INLINE_ENABLE_DISPATCH       TRUE
72
 
73
/*
74
 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
75
 *  be unrolled one time?  In unrolled each iteration of the loop examines
76
 *  two "nodes" on the chain being searched.  Otherwise, only one node
77
 *  is examined per iteration.
78
 *
79
 *  If TRUE, then the loops are unrolled.
80
 *  If FALSE, then the loops are not unrolled.
81
 *
82
 *  The primary factor in making this decision is the cost of disabling
83
 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
84
 *  body of the loop.  On some CPUs, the flash is more expensive than
85
 *  one iteration of the loop body.  In this case, it might be desirable
86
 *  to unroll the loop.  It is important to note that on some CPUs, this
87
 *  code is the longest interrupt disable period in RTEMS.  So it is
88
 *  necessary to strike a balance when setting this parameter.
89
 */
90
 
91
#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
92
 
93
/*
94
 *  Does RTEMS manage a dedicated interrupt stack in software?
95
 *
96
 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
97
 *  If FALSE, nothing is done.
98
 *
99
 *  If the CPU supports a dedicated interrupt stack in hardware,
100
 *  then it is generally the responsibility of the BSP to allocate it
101
 *  and set it up.
102
 *
103
 *  If the CPU does not support a dedicated interrupt stack, then
104
 *  the porter has two options: (1) execute interrupts on the
105
 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
106
 *  interrupt stack.
107
 *
108
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
109
 *
110
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
111
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
112
 *  possible that both are FALSE for a particular CPU.  Although it
113
 *  is unclear what that would imply about the interrupt processing
114
 *  procedure on that CPU.
115
 */
116
 
117
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
118
 
119
/*
120
 *  Does this CPU have hardware support for a dedicated interrupt stack?
121
 *
122
 *  If TRUE, then it must be installed during initialization.
123
 *  If FALSE, then no installation is performed.
124
 *
125
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
126
 *
127
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
128
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
129
 *  possible that both are FALSE for a particular CPU.  Although it
130
 *  is unclear what that would imply about the interrupt processing
131
 *  procedure on that CPU.
132
 */
133
 
134
#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
135
 
136
/*
137
 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
138
 *
139
 *  If TRUE, then the memory is allocated during initialization.
140
 *  If FALSE, then the memory is allocated during initialization.
141
 *
142
 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
143
 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
144
 */
145
 
146
#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
147
 
148
/*
149
 *  Does the RTEMS invoke the user's ISR with the vector number and
150
 *  a pointer to the saved interrupt frame (1) or just the vector
151
 *  number (0)?
152
 */
153
 
154
#define CPU_ISR_PASSES_FRAME_POINTER 0
155
 
156
/*
157
 *  Does the CPU have hardware floating point?
158
 *
159
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
160
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
161
 *
162
 *  If there is a FP coprocessor such as the i387 or mc68881, then
163
 *  the answer is TRUE.
164
 *
165
 *  The macro name "MIPS64ORION_HAS_FPU" should be made CPU specific.
166
 *  It indicates whether or not this CPU model has FP support.  For
167
 *  example, it would be possible to have an i386_nofp CPU model
168
 *  which set this to false to indicate that you have an i386 without
169
 *  an i387 and wish to leave floating point support out of RTEMS.
170
 */
171
 
172
#if ( MIPS64ORION_HAS_FPU == 1 )
173
#define CPU_HARDWARE_FP     TRUE
174
#else
175
#define CPU_HARDWARE_FP     FALSE
176
#endif
177
 
178
/*
179
 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
180
 *
181
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
182
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
183
 *
184
 *  So far, the only CPU in which this option has been used is the
185
 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
186
 *  floating point registers to perform integer multiplies.  If
187
 *  a function which you would not think utilize the FP unit DOES,
188
 *  then one can not easily predict which tasks will use the FP hardware.
189
 *  In this case, this option should be TRUE.
190
 *
191
 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
192
 */
193
 
194
#define CPU_ALL_TASKS_ARE_FP    FALSE
195
 
196
/*
197
 *  Should the IDLE task have a floating point context?
198
 *
199
 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
200
 *  and it has a floating point context which is switched in and out.
201
 *  If FALSE, then the IDLE task does not have a floating point context.
202
 *
203
 *  Setting this to TRUE negatively impacts the time required to preempt
204
 *  the IDLE task from an interrupt because the floating point context
205
 *  must be saved as part of the preemption.
206
 */
207
 
208
#define CPU_IDLE_TASK_IS_FP      FALSE
209
 
210
/*
211
 *  Should the saving of the floating point registers be deferred
212
 *  until a context switch is made to another different floating point
213
 *  task?
214
 *
215
 *  If TRUE, then the floating point context will not be stored until
216
 *  necessary.  It will remain in the floating point registers and not
217
 *  disturned until another floating point task is switched to.
218
 *
219
 *  If FALSE, then the floating point context is saved when a floating
220
 *  point task is switched out and restored when the next floating point
221
 *  task is restored.  The state of the floating point registers between
222
 *  those two operations is not specified.
223
 *
224
 *  If the floating point context does NOT have to be saved as part of
225
 *  interrupt dispatching, then it should be safe to set this to TRUE.
226
 *
227
 *  Setting this flag to TRUE results in using a different algorithm
228
 *  for deciding when to save and restore the floating point context.
229
 *  The deferred FP switch algorithm minimizes the number of times
230
 *  the FP context is saved and restored.  The FP context is not saved
231
 *  until a context switch is made to another, different FP task.
232
 *  Thus in a system with only one FP task, the FP context will never
233
 *  be saved or restored.
234
 */
235
 
236
#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
237
 
238
/*
239
 *  Does this port provide a CPU dependent IDLE task implementation?
240
 *
241
 *  If TRUE, then the routine _CPU_Internal_threads_Idle_thread_body
242
 *  must be provided and is the default IDLE thread body instead of
243
 *  _Internal_threads_Idle_thread_body.
244
 *
245
 *  If FALSE, then use the generic IDLE thread body if the BSP does
246
 *  not provide one.
247
 *
248
 *  This is intended to allow for supporting processors which have
249
 *  a low power or idle mode.  When the IDLE thread is executed, then
250
 *  the CPU can be powered down.
251
 *
252
 *  The order of precedence for selecting the IDLE thread body is:
253
 *
254
 *    1.  BSP provided
255
 *    2.  CPU dependent (if provided)
256
 *    3.  generic (if no BSP and no CPU dependent)
257
 */
258
 
259
/* we can use the low power wait instruction for the IDLE thread */
260
#define CPU_PROVIDES_IDLE_THREAD_BODY    TRUE 
261
 
262
/*
263
 *  Does the stack grow up (toward higher addresses) or down
264
 *  (toward lower addresses)?
265
 *
266
 *  If TRUE, then the grows upward.
267
 *  If FALSE, then the grows toward smaller addresses.
268
 */
269
 
270
/* our stack grows down */
271
#define CPU_STACK_GROWS_UP               FALSE
272
 
273
/*
274
 *  The following is the variable attribute used to force alignment
275
 *  of critical RTEMS structures.  On some processors it may make
276
 *  sense to have these aligned on tighter boundaries than
277
 *  the minimum requirements of the compiler in order to have as
278
 *  much of the critical data area as possible in a cache line.
279
 *
280
 *  The placement of this macro in the declaration of the variables
281
 *  is based on the syntactically requirements of the GNU C
282
 *  "__attribute__" extension.  For example with GNU C, use
283
 *  the following to force a structures to a 32 byte boundary.
284
 *
285
 *      __attribute__ ((aligned (32)))
286
 *
287
 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
288
 *         To benefit from using this, the data must be heavily
289
 *         used so it will stay in the cache and used frequently enough
290
 *         in the executive to justify turning this on.
291
 */
292
 
293
/* our cache line size is 16 bytes */
294
#if __GNUC__
295
#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (16)))
296
#else
297
#define CPU_STRUCTURE_ALIGNMENT 
298
#endif
299
 
300
/*
301
 *  Define what is required to specify how the network to host conversion
302
 *  routines are handled.
303
 */
304
 
305
#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
306
#define CPU_BIG_ENDIAN                           TRUE
307
#define CPU_LITTLE_ENDIAN                        FALSE
308
 
309
/*
310
 *  The following defines the number of bits actually used in the
311
 *  interrupt field of the task mode.  How those bits map to the
312
 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
313
 */
314
 
315
#define CPU_MODES_INTERRUPT_MASK   0x00000001
316
 
317
/*
318
 *  Processor defined structures
319
 *
320
 *  Examples structures include the descriptor tables from the i386
321
 *  and the processor control structure on the i960ca.
322
 */
323
 
324
/* may need to put some structures here.  */
325
 
326
/*
327
 * Contexts
328
 *
329
 *  Generally there are 2 types of context to save.
330
 *     1. Interrupt registers to save
331
 *     2. Task level registers to save
332
 *
333
 *  This means we have the following 3 context items:
334
 *     1. task level context stuff::  Context_Control
335
 *     2. floating point task stuff:: Context_Control_fp
336
 *     3. special interrupt level context :: Context_Control_interrupt
337
 *
338
 *  On some processors, it is cost-effective to save only the callee
339
 *  preserved registers during a task context switch.  This means
340
 *  that the ISR code needs to save those registers which do not
341
 *  persist across function calls.  It is not mandatory to make this
342
 *  distinctions between the caller/callee saves registers for the
343
 *  purpose of minimizing context saved during task switch and on interrupts.
344
 *  If the cost of saving extra registers is minimal, simplicity is the
345
 *  choice.  Save the same context on interrupt entry as for tasks in
346
 *  this case.
347
 *
348
 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
349
 *  care should be used in designing the context area.
350
 *
351
 *  On some CPUs with hardware floating point support, the Context_Control_fp
352
 *  structure will not be used or it simply consist of an array of a
353
 *  fixed number of bytes.   This is done when the floating point context
354
 *  is dumped by a "FP save context" type instruction and the format
355
 *  is not really defined by the CPU.  In this case, there is no need
356
 *  to figure out the exact format -- only the size.  Of course, although
357
 *  this is enough information for RTEMS, it is probably not enough for
358
 *  a debugger such as gdb.  But that is another problem.
359
 */
360
 
361
/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
362
typedef struct {
363
    unsigned64 s0;
364
    unsigned64 s1;
365
    unsigned64 s2;
366
    unsigned64 s3;
367
    unsigned64 s4;
368
    unsigned64 s5;
369
    unsigned64 s6;
370
    unsigned64 s7;
371
    unsigned64 sp;
372
    unsigned64 fp;
373
    unsigned64 ra;
374
    unsigned64 c0_sr;
375
    unsigned64 c0_epc;
376
} Context_Control;
377
 
378
/* WARNING: If this structure is modified, the constants in cpu.h must be updated. */
379
typedef struct {
380
    unsigned32      fp0;
381
    unsigned32      fp1;
382
    unsigned32      fp2;
383
    unsigned32      fp3;
384
    unsigned32      fp4;
385
    unsigned32      fp5;
386
    unsigned32      fp6;
387
    unsigned32      fp7;
388
    unsigned32      fp8;
389
    unsigned32      fp9;
390
    unsigned32      fp10;
391
    unsigned32      fp11;
392
    unsigned32      fp12;
393
    unsigned32      fp13;
394
    unsigned32      fp14;
395
    unsigned32      fp15;
396
    unsigned32      fp16;
397
    unsigned32      fp17;
398
    unsigned32      fp18;
399
    unsigned32      fp19;
400
    unsigned32      fp20;
401
    unsigned32      fp21;
402
    unsigned32      fp22;
403
    unsigned32      fp23;
404
    unsigned32      fp24;
405
    unsigned32      fp25;
406
    unsigned32      fp26;
407
    unsigned32      fp27;
408
    unsigned32      fp28;
409
    unsigned32      fp29;
410
    unsigned32      fp30;
411
    unsigned32      fp31;
412
} Context_Control_fp;
413
 
414
typedef struct {
415
    unsigned32 special_interrupt_register;
416
} CPU_Interrupt_frame;
417
 
418
 
419
/*
420
 *  The following table contains the information required to configure
421
 *  the mips processor specific parameters.
422
 */
423
 
424
typedef struct {
425
  void       (*pretasking_hook)( void );
426
  void       (*predriver_hook)( void );
427
  void       (*postdriver_hook)( void );
428
  void       (*idle_task)( void );
429
  boolean      do_zero_of_workspace;
430
  unsigned32   idle_task_stack_size;
431
  unsigned32   interrupt_stack_size;
432
  unsigned32   extra_mpci_receive_server_stack;
433
  void *     (*stack_allocate_hook)( unsigned32 );
434
  void       (*stack_free_hook)( void* );
435
  /* end of fields required on all CPUs */
436
 
437
  unsigned32   clicks_per_microsecond;
438
}   rtems_cpu_table;
439
 
440
/*
441
 *  Macros to access required entires in the CPU Table are in
442
 *  the file rtems/system.h.
443
 */
444
 
445
/*
446
 *  Macros to access MIPS64ORION specific additions to the CPU Table
447
 */
448
 
449
#define rtems_cpu_configuration_get_clicks_per_microsecond() \
450
   (_CPU_Table.clicks_per_microsecond)
451
 
452
/*
453
 *  This variable is optional.  It is used on CPUs on which it is difficult
454
 *  to generate an "uninitialized" FP context.  It is filled in by
455
 *  _CPU_Initialize and copied into the task's FP context area during
456
 *  _CPU_Context_Initialize.
457
 */
458
 
459
SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
460
 
461
/*
462
 *  On some CPUs, RTEMS supports a software managed interrupt stack.
463
 *  This stack is allocated by the Interrupt Manager and the switch
464
 *  is performed in _ISR_Handler.  These variables contain pointers
465
 *  to the lowest and highest addresses in the chunk of memory allocated
466
 *  for the interrupt stack.  Since it is unknown whether the stack
467
 *  grows up or down (in general), this give the CPU dependent
468
 *  code the option of picking the version it wants to use.
469
 *
470
 *  NOTE: These two variables are required if the macro
471
 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
472
 */
473
 
474
SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
475
SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
476
 
477
/*
478
 *  With some compilation systems, it is difficult if not impossible to
479
 *  call a high-level language routine from assembly language.  This
480
 *  is especially true of commercial Ada compilers and name mangling
481
 *  C++ ones.  This variable can be optionally defined by the CPU porter
482
 *  and contains the address of the routine _Thread_Dispatch.  This
483
 *  can make it easier to invoke that routine at the end of the interrupt
484
 *  sequence (if a dispatch is necessary).
485
 */
486
 
487
SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
488
 
489
/*
490
 *  Nothing prevents the porter from declaring more CPU specific variables.
491
 */
492
 
493
/* XXX: if needed, put more variables here */
494
 
495
/*
496
 *  The size of the floating point context area.  On some CPUs this
497
 *  will not be a "sizeof" because the format of the floating point
498
 *  area is not defined -- only the size is.  This is usually on
499
 *  CPUs with a "floating point save context" instruction.
500
 */
501
 
502
#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
503
 
504
/*
505
 *  Amount of extra stack (above minimum stack size) required by
506
 *  system initialization thread.  Remember that in a multiprocessor
507
 *  system the system intialization thread becomes the MP server thread.
508
 */
509
 
510
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
511
 
512
/*
513
 *  This defines the number of entries in the ISR_Vector_table managed
514
 *  by RTEMS.
515
 */
516
 
517
#define CPU_INTERRUPT_NUMBER_OF_VECTORS      8
518
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
519
 
520
/*
521
 *  Should be large enough to run all RTEMS tests.  This insures
522
 *  that a "reasonable" small application should not have any problems.
523
 */
524
 
525
#define CPU_STACK_MINIMUM_SIZE          (2048*sizeof(unsigned32))
526
 
527
/*
528
 *  CPU's worst alignment requirement for data types on a byte boundary.  This
529
 *  alignment does not take into account the requirements for the stack.
530
 */
531
 
532
#define CPU_ALIGNMENT              8
533
 
534
/*
535
 *  This number corresponds to the byte alignment requirement for the
536
 *  heap handler.  This alignment requirement may be stricter than that
537
 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
538
 *  common for the heap to follow the same alignment requirement as
539
 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
540
 *  then this should be set to CPU_ALIGNMENT.
541
 *
542
 *  NOTE:  This does not have to be a power of 2.  It does have to
543
 *         be greater or equal to than CPU_ALIGNMENT.
544
 */
545
 
546
#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
547
 
548
/*
549
 *  This number corresponds to the byte alignment requirement for memory
550
 *  buffers allocated by the partition manager.  This alignment requirement
551
 *  may be stricter than that for the data types alignment specified by
552
 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
553
 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
554
 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
555
 *
556
 *  NOTE:  This does not have to be a power of 2.  It does have to
557
 *         be greater or equal to than CPU_ALIGNMENT.
558
 */
559
 
560
#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
561
 
562
/*
563
 *  This number corresponds to the byte alignment requirement for the
564
 *  stack.  This alignment requirement may be stricter than that for the
565
 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
566
 *  is strict enough for the stack, then this should be set to 0.
567
 *
568
 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
569
 */
570
 
571
#define CPU_STACK_ALIGNMENT        CPU_ALIGNMENT
572
 
573
/* ISR handler macros */
574
 
575
/*
576
 *  Disable all interrupts for an RTEMS critical section.  The previous
577
 *  level is returned in _level.
578
 */
579
 
580
#define _CPU_ISR_Disable( _int_level ) \
581
  do{ \
582
        _int_level = mips_disable_interrupts(); \
583
  }while(0)
584
 
585
/*
586
 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
587
 *  This indicates the end of an RTEMS critical section.  The parameter
588
 *  _level is not modified.
589
 */
590
 
591
#define _CPU_ISR_Enable( _level )  \
592
  do{ \
593
        mips_enable_interrupts(_level); \
594
  }while(0)
595
 
596
/*
597
 *  This temporarily restores the interrupt to _level before immediately
598
 *  disabling them again.  This is used to divide long RTEMS critical
599
 *  sections into two or more parts.  The parameter _level is not
600
 * modified.
601
 */
602
 
603
#define _CPU_ISR_Flash( _xlevel ) \
604
  do{ \
605
        int _scratch; \
606
        _CPU_ISR_Enable( _xlevel ); \
607
        _CPU_ISR_Disable( _scratch ); \
608
  }while(0)
609
 
610
/*
611
 *  Map interrupt level in task mode onto the hardware that the CPU
612
 *  actually provides.  Currently, interrupt levels which do not
613
 *  map onto the CPU in a generic fashion are undefined.  Someday,
614
 *  it would be nice if these were "mapped" by the application
615
 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
616
 *  8 - 255 would be available for bsp/application specific meaning.
617
 *  This could be used to manage a programmable interrupt controller
618
 *  via the rtems_task_mode directive.
619
 */
620
extern void _CPU_ISR_Set_level( unsigned32 _new_level );
621
 
622
unsigned32 _CPU_ISR_Get_level( void );
623
 
624
/* end of ISR handler macros */
625
 
626
/* Context handler macros */
627
 
628
/*
629
 *  Initialize the context to a state suitable for starting a
630
 *  task after a context restore operation.  Generally, this
631
 *  involves:
632
 *
633
 *     - setting a starting address
634
 *     - preparing the stack
635
 *     - preparing the stack and frame pointers
636
 *     - setting the proper interrupt level in the context
637
 *     - initializing the floating point context
638
 *
639
 *  This routine generally does not set any unnecessary register
640
 *  in the context.  The state of the "general data" registers is
641
 *  undefined at task start time.
642
 *
643
 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
644
 *        point thread.  This is typically only used on CPUs where the
645
 *        FPU may be easily disabled by software such as on the SPARC
646
 *        where the PSR contains an enable FPU bit.
647
 */
648
 
649
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
650
                                 _isr, _entry_point, _is_fp ) \
651
  { \
652
        unsigned32 _stack_tmp = (unsigned32)(_stack_base) + (_size) - CPU_STACK_ALIGNMENT; \
653
        _stack_tmp &= ~(CPU_STACK_ALIGNMENT - 1); \
654
        (_the_context)->sp = _stack_tmp; \
655
        (_the_context)->fp = _stack_tmp; \
656
        (_the_context)->ra = (unsigned64)_entry_point; \
657
        (_the_context)->c0_sr = 0; \
658
  }
659
 
660
/*
661
 *  This routine is responsible for somehow restarting the currently
662
 *  executing task.  If you are lucky, then all that is necessary
663
 *  is restoring the context.  Otherwise, there will need to be
664
 *  a special assembly routine which does something special in this
665
 *  case.  Context_Restore should work most of the time.  It will
666
 *  not work if restarting self conflicts with the stack frame
667
 *  assumptions of restoring a context.
668
 */
669
 
670
#define _CPU_Context_Restart_self( _the_context ) \
671
   _CPU_Context_restore( (_the_context) );
672
 
673
/*
674
 *  The purpose of this macro is to allow the initial pointer into
675
 *  A floating point context area (used to save the floating point
676
 *  context) to be at an arbitrary place in the floating point
677
 *  context area.
678
 *
679
 *  This is necessary because some FP units are designed to have
680
 *  their context saved as a stack which grows into lower addresses.
681
 *  Other FP units can be saved by simply moving registers into offsets
682
 *  from the base of the context area.  Finally some FP units provide
683
 *  a "dump context" instruction which could fill in from high to low
684
 *  or low to high based on the whim of the CPU designers.
685
 */
686
 
687
#define _CPU_Context_Fp_start( _base, _offset ) \
688
   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
689
 
690
/*
691
 *  This routine initializes the FP context area passed to it to.
692
 *  There are a few standard ways in which to initialize the
693
 *  floating point context.  The code included for this macro assumes
694
 *  that this is a CPU in which a "initial" FP context was saved into
695
 *  _CPU_Null_fp_context and it simply copies it to the destination
696
 *  context passed to it.
697
 *
698
 *  Other models include (1) not doing anything, and (2) putting
699
 *  a "null FP status word" in the correct place in the FP context.
700
 */
701
 
702
#define _CPU_Context_Initialize_fp( _destination ) \
703
  { \
704
   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
705
  }
706
 
707
/* end of Context handler macros */
708
 
709
/* Fatal Error manager macros */
710
 
711
/*
712
 *  This routine copies _error into a known place -- typically a stack
713
 *  location or a register, optionally disables interrupts, and
714
 *  halts/stops the CPU.
715
 */
716
 
717
#define _CPU_Fatal_halt( _error ) \
718
  { \
719
    mips_disable_global_interrupts(); \
720
    mips_fatal_error(_error); \
721
  }
722
 
723
/* end of Fatal Error manager macros */
724
 
725
/* Bitfield handler macros */
726
 
727
/*
728
 *  This routine sets _output to the bit number of the first bit
729
 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
730
 *  This type may be either 16 or 32 bits wide although only the 16
731
 *  least significant bits will be used.
732
 *
733
 *  There are a number of variables in using a "find first bit" type
734
 *  instruction.
735
 *
736
 *    (1) What happens when run on a value of zero?
737
 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
738
 *    (3) The numbering may be zero or one based.
739
 *    (4) The "find first bit" instruction may search from MSB or LSB.
740
 *
741
 *  RTEMS guarantees that (1) will never happen so it is not a concern.
742
 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
743
 *  _CPU_Priority_bits_index().  These three form a set of routines
744
 *  which must logically operate together.  Bits in the _value are
745
 *  set and cleared based on masks built by _CPU_Priority_mask().
746
 *  The basic major and minor values calculated by _Priority_Major()
747
 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
748
 *  to properly range between the values returned by the "find first bit"
749
 *  instruction.  This makes it possible for _Priority_Get_highest() to
750
 *  calculate the major and directly index into the minor table.
751
 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
752
 *  is the first bit found.
753
 *
754
 *  This entire "find first bit" and mapping process depends heavily
755
 *  on the manner in which a priority is broken into a major and minor
756
 *  components with the major being the 4 MSB of a priority and minor
757
 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
758
 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
759
 *  to the lowest priority.
760
 *
761
 *  If your CPU does not have a "find first bit" instruction, then
762
 *  there are ways to make do without it.  Here are a handful of ways
763
 *  to implement this in software:
764
 *
765
 *    - a series of 16 bit test instructions
766
 *    - a "binary search using if's"
767
 *    - _number = 0
768
 *      if _value > 0x00ff
769
 *        _value >>=8
770
 *        _number = 8;
771
 *
772
 *      if _value > 0x0000f
773
 *        _value >=8
774
 *        _number += 4
775
 *
776
 *      _number += bit_set_table[ _value ]
777
 *
778
 *    where bit_set_table[ 16 ] has values which indicate the first
779
 *      bit set
780
 */
781
 
782
#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
783
#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
784
 
785
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
786
 
787
#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
788
  { \
789
    (_output) = 0;   /* do something to prevent warnings */ \
790
  }
791
 
792
#endif
793
 
794
/* end of Bitfield handler macros */
795
 
796
/*
797
 *  This routine builds the mask which corresponds to the bit fields
798
 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
799
 *  for that routine.
800
 */
801
 
802
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
803
 
804
#define _CPU_Priority_Mask( _bit_number ) \
805
  ( 1 << (_bit_number) )
806
 
807
#endif
808
 
809
/*
810
 *  This routine translates the bit numbers returned by
811
 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
812
 *  a major or minor component of a priority.  See the discussion
813
 *  for that routine.
814
 */
815
 
816
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
817
 
818
#define _CPU_Priority_bits_index( _priority ) \
819
  (_priority)
820
 
821
#endif
822
 
823
/* end of Priority handler macros */
824
 
825
/* functions */
826
 
827
/*
828
 *  _CPU_Initialize
829
 *
830
 *  This routine performs CPU dependent initialization.
831
 */
832
 
833
void _CPU_Initialize(
834
  rtems_cpu_table  *cpu_table,
835
  void      (*thread_dispatch)
836
);
837
 
838
/*
839
 *  _CPU_ISR_install_raw_handler
840
 *
841
 *  This routine installs a "raw" interrupt handler directly into the
842
 *  processor's vector table.
843
 */
844
 
845
void _CPU_ISR_install_raw_handler(
846
  unsigned32  vector,
847
  proc_ptr    new_handler,
848
  proc_ptr   *old_handler
849
);
850
 
851
/*
852
 *  _CPU_ISR_install_vector
853
 *
854
 *  This routine installs an interrupt vector.
855
 */
856
 
857
void _CPU_ISR_install_vector(
858
  unsigned32  vector,
859
  proc_ptr    new_handler,
860
  proc_ptr   *old_handler
861
);
862
 
863
/*
864
 *  _CPU_Install_interrupt_stack
865
 *
866
 *  This routine installs the hardware interrupt stack pointer.
867
 *
868
 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
869
 *         is TRUE.
870
 */
871
 
872
void _CPU_Install_interrupt_stack( void );
873
 
874
/*
875
 *  _CPU_Internal_threads_Idle_thread_body
876
 *
877
 *  This routine is the CPU dependent IDLE thread body.
878
 *
879
 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
880
 *         is TRUE.
881
 */
882
 
883
void _CPU_Thread_Idle_body( void );
884
 
885
/*
886
 *  _CPU_Context_switch
887
 *
888
 *  This routine switches from the run context to the heir context.
889
 */
890
 
891
void _CPU_Context_switch(
892
  Context_Control  *run,
893
  Context_Control  *heir
894
);
895
 
896
/*
897
 *  _CPU_Context_restore
898
 *
899
 *  This routine is generally used only to restart self in an
900
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
901
 *
902
 *  NOTE: May be unnecessary to reload some registers.
903
 */
904
 
905
void _CPU_Context_restore(
906
  Context_Control *new_context
907
);
908
 
909
/*
910
 *  _CPU_Context_save_fp
911
 *
912
 *  This routine saves the floating point context passed to it.
913
 */
914
 
915
void _CPU_Context_save_fp(
916
  void **fp_context_ptr
917
);
918
 
919
/*
920
 *  _CPU_Context_restore_fp
921
 *
922
 *  This routine restores the floating point context passed to it.
923
 */
924
 
925
void _CPU_Context_restore_fp(
926
  void **fp_context_ptr
927
);
928
 
929
/*  The following routine swaps the endian format of an unsigned int.
930
 *  It must be static because it is referenced indirectly.
931
 *
932
 *  This version will work on any processor, but if there is a better
933
 *  way for your CPU PLEASE use it.  The most common way to do this is to:
934
 *
935
 *     swap least significant two bytes with 16-bit rotate
936
 *     swap upper and lower 16-bits
937
 *     swap most significant two bytes with 16-bit rotate
938
 *
939
 *  Some CPUs have special instructions which swap a 32-bit quantity in
940
 *  a single instruction (e.g. i486).  It is probably best to avoid
941
 *  an "endian swapping control bit" in the CPU.  One good reason is
942
 *  that interrupts would probably have to be disabled to insure that
943
 *  an interrupt does not try to access the same "chunk" with the wrong
944
 *  endian.  Another good reason is that on some CPUs, the endian bit
945
 *  endianness for ALL fetches -- both code and data -- so the code
946
 *  will be fetched incorrectly.
947
 */
948
 
949
static inline unsigned int CPU_swap_u32(
950
  unsigned int value
951
)
952
{
953
  unsigned32 byte1, byte2, byte3, byte4, swapped;
954
 
955
  byte4 = (value >> 24) & 0xff;
956
  byte3 = (value >> 16) & 0xff;
957
  byte2 = (value >> 8)  & 0xff;
958
  byte1 =  value        & 0xff;
959
 
960
  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
961
  return( swapped );
962
}
963
 
964
#define CPU_swap_u16( value ) \
965
  (((value&0xff) << 8) | ((value >> 8)&0xff))
966
 
967
/*
968
 *  Miscellaneous prototypes
969
 *
970
 *  NOTE:  The names should have mips64orion in them.
971
 */
972
 
973
void disable_int( unsigned32 mask );
974
void enable_int( unsigned32 mask );
975
 
976
#ifdef __cplusplus
977
}
978
#endif
979
 
980
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.