OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rtems-20020807/] [cpukit/] [score/] [cpu/] [or32/] [rtems/] [score/] [cpu.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1026 ivang
/*  cpu.h
2
 *
3
 *  This include file contains macros pertaining to the Opencores
4
 *  or1k processor family.
5
 *
6
 *  COPYRIGHT (c) 1989-1999.
7
 *  On-Line Applications Research Corporation (OAR).
8
 *
9
 *  The license and distribution terms for this file may be
10
 *  found in the file LICENSE in this distribution or at
11
 *  http://www.OARcorp.com/rtems/license.html.
12
 *
13
 *  This file adapted from no_cpu example of the RTEMS distribution.
14
 *  The body has been modified for the Opencores Or1k implementation by
15
 *  Chris Ziomkowski. <chris@asics.ws>
16
 *
17
 */
18
 
19
#ifndef _OR1K_CPU_h
20
#define _OR1K_CPU_h
21
 
22
#ifdef __cplusplus
23
extern "C" {
24
#endif
25
 
26
#include "rtems/score/or32.h"            /* pick up machine definitions */
27
#ifndef ASM
28
#include "rtems/score/types.h"
29
#endif
30
 
31
/* conditional compilation parameters */
32
 
33
/*
34
 *  Should the calls to _Thread_Enable_dispatch be inlined?
35
 *
36
 *  If TRUE, then they are inlined.
37
 *  If FALSE, then a subroutine call is made.
38
 *
39
 *  Basically this is an example of the classic trade-off of size
40
 *  versus speed.  Inlining the call (TRUE) typically increases the
41
 *  size of RTEMS while speeding up the enabling of dispatching.
42
 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
43
 *  only be 0 or 1 unless you are in an interrupt handler and that
44
 *  interrupt handler invokes the executive.]  When not inlined
45
 *  something calls _Thread_Enable_dispatch which in turns calls
46
 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
47
 *  one subroutine call is avoided entirely.]
48
 *
49
 */
50
 
51
#define CPU_INLINE_ENABLE_DISPATCH       FALSE
52
 
53
/*
54
 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
55
 *  be unrolled one time?  In unrolled each iteration of the loop examines
56
 *  two "nodes" on the chain being searched.  Otherwise, only one node
57
 *  is examined per iteration.
58
 *
59
 *  If TRUE, then the loops are unrolled.
60
 *  If FALSE, then the loops are not unrolled.
61
 *
62
 *  The primary factor in making this decision is the cost of disabling
63
 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
64
 *  body of the loop.  On some CPUs, the flash is more expensive than
65
 *  one iteration of the loop body.  In this case, it might be desirable
66
 *  to unroll the loop.  It is important to note that on some CPUs, this
67
 *  code is the longest interrupt disable period in RTEMS.  So it is
68
 *  necessary to strike a balance when setting this parameter.
69
 *
70
 */
71
 
72
#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
73
 
74
/*
75
 *  Does RTEMS manage a dedicated interrupt stack in software?
76
 *
77
 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
78
 *  If FALSE, nothing is done.
79
 *
80
 *  If the CPU supports a dedicated interrupt stack in hardware,
81
 *  then it is generally the responsibility of the BSP to allocate it
82
 *  and set it up.
83
 *
84
 *  If the CPU does not support a dedicated interrupt stack, then
85
 *  the porter has two options: (1) execute interrupts on the
86
 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
87
 *  interrupt stack.
88
 *
89
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
90
 *
91
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
92
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
93
 *  possible that both are FALSE for a particular CPU.  Although it
94
 *  is unclear what that would imply about the interrupt processing
95
 *  procedure on that CPU.
96
 *
97
 *  For the first cut of an Or1k implementation, let's not worry
98
 *  about this, and assume that our C code will autoperform any
99
 *  frame/stack allocation for us when the procedure is entered.
100
 *  If we write assembly code, we may have to deal with this manually.
101
 *  This can be changed later if we find it is impossible. This
102
 *  behavior is desireable as it allows us to work in low memory
103
 *  environments where we don't have room for a dedicated stack.
104
 */
105
 
106
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
107
 
108
/*
109
 *  Does this CPU have hardware support for a dedicated interrupt stack?
110
 *
111
 *  If TRUE, then it must be installed during initialization.
112
 *  If FALSE, then no installation is performed.
113
 *
114
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
115
 *
116
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
117
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
118
 *  possible that both are FALSE for a particular CPU.  Although it
119
 *  is unclear what that would imply about the interrupt processing
120
 *  procedure on that CPU.
121
 *
122
 */
123
 
124
#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
125
 
126
/*
127
 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
128
 *
129
 *  If TRUE, then the memory is allocated during initialization.
130
 *  If FALSE, then the memory is allocated during initialization.
131
 *
132
 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
133
 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
134
 *
135
 */
136
 
137
#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
138
 
139
/*
140
 *  Does the RTEMS invoke the user's ISR with the vector number and
141
 *  a pointer to the saved interrupt frame (1) or just the vector
142
 *  number (0)?
143
 *
144
 */
145
 
146
#define CPU_ISR_PASSES_FRAME_POINTER 0
147
 
148
/*
149
 *  Does the CPU have hardware floating point?
150
 *
151
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
152
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
153
 *
154
 *  If there is a FP coprocessor such as the i387 or mc68881, then
155
 *  the answer is TRUE.
156
 *
157
 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
158
 *  It indicates whether or not this CPU model has FP support.  For
159
 *  example, it would be possible to have an i386_nofp CPU model
160
 *  which set this to false to indicate that you have an i386 without
161
 *  an i387 and wish to leave floating point support out of RTEMS.
162
 *
163
 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
164
 *  is software implemented floating point that must be context
165
 *  switched.  The determination of whether or not this applies
166
 *  is very tool specific and the state saved/restored is also
167
 *  compiler specific.
168
 *
169
 *  Or1k Specific Information:
170
 *
171
 *  At this time there are no implementations of Or1k that are
172
 *  expected to implement floating point. More importantly, the
173
 *  floating point architecture is expected to change significantly
174
 *  before such chips are fabricated.
175
 */
176
 
177
#if ( OR1K_HAS_FPU == 1 )
178
#define CPU_HARDWARE_FP     TRUE
179
#define CPU_SOFTWARE_FP     FALSE
180
#else
181
#define CPU_HARDWARE_FP     FALSE
182
#define CPU_SOFTWARE_FP     TRUE
183
#endif
184
 
185
 
186
/*
187
 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
188
 *
189
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
190
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
191
 *
192
 *  So far, the only CPU in which this option has been used is the
193
 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
194
 *  floating point registers to perform integer multiplies.  If
195
 *  a function which you would not think utilize the FP unit DOES,
196
 *  then one can not easily predict which tasks will use the FP hardware.
197
 *  In this case, this option should be TRUE.
198
 *
199
 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
200
 *
201
 */
202
 
203
#define CPU_ALL_TASKS_ARE_FP     FALSE
204
 
205
/*
206
 *  Should the IDLE task have a floating point context?
207
 *
208
 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
209
 *  and it has a floating point context which is switched in and out.
210
 *  If FALSE, then the IDLE task does not have a floating point context.
211
 *
212
 *  Setting this to TRUE negatively impacts the time required to preempt
213
 *  the IDLE task from an interrupt because the floating point context
214
 *  must be saved as part of the preemption.
215
 *
216
 */
217
 
218
#define CPU_IDLE_TASK_IS_FP      FALSE
219
 
220
/*
221
 *  Should the saving of the floating point registers be deferred
222
 *  until a context switch is made to another different floating point
223
 *  task?
224
 *
225
 *  If TRUE, then the floating point context will not be stored until
226
 *  necessary.  It will remain in the floating point registers and not
227
 *  disturned until another floating point task is switched to.
228
 *
229
 *  If FALSE, then the floating point context is saved when a floating
230
 *  point task is switched out and restored when the next floating point
231
 *  task is restored.  The state of the floating point registers between
232
 *  those two operations is not specified.
233
 *
234
 *  If the floating point context does NOT have to be saved as part of
235
 *  interrupt dispatching, then it should be safe to set this to TRUE.
236
 *
237
 *  Setting this flag to TRUE results in using a different algorithm
238
 *  for deciding when to save and restore the floating point context.
239
 *  The deferred FP switch algorithm minimizes the number of times
240
 *  the FP context is saved and restored.  The FP context is not saved
241
 *  until a context switch is made to another, different FP task.
242
 *  Thus in a system with only one FP task, the FP context will never
243
 *  be saved or restored.
244
 *
245
 */
246
 
247
#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
248
 
249
/*
250
 *  Does this port provide a CPU dependent IDLE task implementation?
251
 *
252
 *  If TRUE, then the routine _CPU_Thread_Idle_body
253
 *  must be provided and is the default IDLE thread body instead of
254
 *  _CPU_Thread_Idle_body.
255
 *
256
 *  If FALSE, then use the generic IDLE thread body if the BSP does
257
 *  not provide one.
258
 *
259
 *  This is intended to allow for supporting processors which have
260
 *  a low power or idle mode.  When the IDLE thread is executed, then
261
 *  the CPU can be powered down.
262
 *
263
 *  The order of precedence for selecting the IDLE thread body is:
264
 *
265
 *    1.  BSP provided
266
 *    2.  CPU dependent (if provided)
267
 *    3.  generic (if no BSP and no CPU dependent)
268
 *
269
 */
270
 
271
#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
272
 
273
/*
274
 *  Does the stack grow up (toward higher addresses) or down
275
 *  (toward lower addresses)?
276
 *
277
 *  If TRUE, then the grows upward.
278
 *  If FALSE, then the grows toward smaller addresses.
279
 *
280
 *  Or1k Specific Information:
281
 *
282
 *  Previously I had misread the documentation and set this
283
 *  to true. Surprisingly, it seemed to work anyway. I'm
284
 *  therefore not 100% sure exactly what this does. It should
285
 *  be correct as it is now, however.
286
 */
287
 
288
#define CPU_STACK_GROWS_UP               FALSE
289
 
290
/*
291
 *  The following is the variable attribute used to force alignment
292
 *  of critical RTEMS structures.  On some processors it may make
293
 *  sense to have these aligned on tighter boundaries than
294
 *  the minimum requirements of the compiler in order to have as
295
 *  much of the critical data area as possible in a cache line.
296
 *
297
 *  The placement of this macro in the declaration of the variables
298
 *  is based on the syntactically requirements of the GNU C
299
 *  "__attribute__" extension.  For example with GNU C, use
300
 *  the following to force a structures to a 32 byte boundary.
301
 *
302
 *      __attribute__ ((aligned (32)))
303
 *
304
 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
305
 *         To benefit from using this, the data must be heavily
306
 *         used so it will stay in the cache and used frequently enough
307
 *         in the executive to justify turning this on.
308
 *
309
 */
310
 
311
#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
312
 
313
/*
314
 *  Define what is required to specify how the network to host conversion
315
 *  routines are handled.
316
 *
317
 *  Or1k Specific Information:
318
 *
319
 *  This version of RTEMS is designed specifically to run with
320
 *  big endian architectures. If you want little endian, you'll
321
 *  have to make the appropriate adjustments here and write
322
 *  efficient routines for byte swapping. The Or1k architecture
323
 *  doesn't do this very well.
324
 */
325
 
326
#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
327
#define CPU_BIG_ENDIAN                           TRUE
328
#define CPU_LITTLE_ENDIAN                        FALSE
329
 
330
/*
331
 *  The following defines the number of bits actually used in the
332
 *  interrupt field of the task mode.  How those bits map to the
333
 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
334
 *
335
 */
336
 
337
#define CPU_MODES_INTERRUPT_MASK   0x00000001
338
 
339
/*
340
 *  Processor defined structures
341
 *
342
 *  Examples structures include the descriptor tables from the i386
343
 *  and the processor control structure on the i960ca.
344
 *
345
 */
346
 
347
 
348
/*
349
 * Contexts
350
 *
351
 *  Generally there are 2 types of context to save.
352
 *     1. Interrupt registers to save
353
 *     2. Task level registers to save
354
 *
355
 *  This means we have the following 3 context items:
356
 *     1. task level context stuff::  Context_Control
357
 *     2. floating point task stuff:: Context_Control_fp
358
 *     3. special interrupt level context :: Context_Control_interrupt
359
 *
360
 *  On some processors, it is cost-effective to save only the callee
361
 *  preserved registers during a task context switch.  This means
362
 *  that the ISR code needs to save those registers which do not
363
 *  persist across function calls.  It is not mandatory to make this
364
 *  distinctions between the caller/callee saves registers for the
365
 *  purpose of minimizing context saved during task switch and on interrupts.
366
 *  If the cost of saving extra registers is minimal, simplicity is the
367
 *  choice.  Save the same context on interrupt entry as for tasks in
368
 *  this case.
369
 *
370
 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
371
 *  care should be used in designing the context area.
372
 *
373
 *  On some CPUs with hardware floating point support, the Context_Control_fp
374
 *  structure will not be used or it simply consist of an array of a
375
 *  fixed number of bytes.   This is done when the floating point context
376
 *  is dumped by a "FP save context" type instruction and the format
377
 *  is not really defined by the CPU.  In this case, there is no need
378
 *  to figure out the exact format -- only the size.  Of course, although
379
 *  this is enough information for RTEMS, it is probably not enough for
380
 *  a debugger such as gdb.  But that is another problem.
381
 *
382
 *
383
 */
384
 
385
#ifdef OR1K_64BIT_ARCH
386
#define or1kreg unsigned64
387
#else
388
#define or1kreg unsigned32
389
#endif
390
 
391
/* SR_MASK is the mask of values that will be copied to/from the status
392
   register on a context switch. Some values, like the flag state, are
393
   specific on the context, while others, such as interrupt enables,
394
   are global. The currently defined global bits are:
395
 
396
   0x00001 SUPV:     Supervisor mode
397
   0x00002 EXR:      Exceptions on/off
398
   0x00004 EIR:      Interrupts enabled/disabled
399
   0x00008 DCE:      Data cache enabled/disabled
400
   0x00010 ICE:      Instruction cache enabled/disabled
401
   0x00020 DME:      Data MMU enabled/disabled
402
   0x00040 IME:      Instruction MMU enabled/disabled
403
   0x00080 LEE:      Little/Big Endian enable
404
   0x00100 CE:       Context ID/shadow regs enabled/disabled
405
   0x01000 OVE:      Overflow causes exception
406
   0x04000 EP:       Exceptions @ 0x0 or 0xF0000000
407
   0x08000 PXR:      Partial exception recognition enabled/disabled
408
   0x10000 SUMRA:    SPR's accessible/inaccessible
409
 
410
   The context specific bits are:
411
 
412
   0x00200 F         Branch flag indicator
413
   0x00400 CY        Carry flag indicator
414
   0x00800 OV        Overflow flag indicator
415
   0x02000 DSX       Delay slot exception occurred
416
   0xF8000000 CID    Current Context ID
417
*/
418
 
419
#define SR_MASK 0xF8002E00
420
 
421
typedef enum {
422
  SR_SUPV = 0x00001,
423
  SR_EXR = 0x00002,
424
  SR_EIR = 0x00004,
425
  SR_DCE = 0x00008,
426
  SR_ICE = 0x00010,
427
  SR_DME = 0x00020,
428
  SR_IME = 0x00040,
429
  SR_LEE = 0x00080,
430
  SR_CE = 0x00100,
431
  SR_F = 0x00200,
432
  SR_CY = 0x00400,
433
  SR_OV = 0x00800,
434
  SR_OVE = 0x01000,
435
  SR_DSX = 0x02000,
436
  SR_EP = 0x04000,
437
  SR_PXR = 0x08000,
438
  SR_SUMRA = 0x10000,
439
  SR_CID = 0xF8000000,
440
} StatusRegisterBits;
441
 
442
typedef struct {
443
  unsigned32  sr;     /* Current status register non persistent values */
444
  unsigned32  esr;    /* Saved exception status register */
445
  unsigned32  ear;    /* Saved exception effective address register */
446
  unsigned32  epc;    /* Saved exception PC register    */
447
  or1kreg     r[31];  /* Registers */
448
  or1kreg     pc;     /* Context PC 4 or 8 bytes for 64 bit alignment */
449
} Context_Control;
450
 
451
typedef int Context_Control_fp;
452
typedef Context_Control CPU_Interrupt_frame;
453
#define _CPU_Null_fp_context 0
454
#define _CPU_Interrupt_stack_low 0
455
#define _CPU_Interrupt_stack_high 0
456
 
457
/*
458
 *  The following table contains the information required to configure
459
 *  the XXX processor specific parameters.
460
 *
461
 */
462
 
463
typedef struct {
464
  void       (*pretasking_hook)( void );
465
  void       (*predriver_hook)( void );
466
  void       (*postdriver_hook)( void );
467
  void       (*idle_task)( void );
468
  boolean      do_zero_of_workspace;
469
  unsigned32   idle_task_stack_size;
470
  unsigned32   interrupt_stack_size;
471
  unsigned32   extra_mpci_receive_server_stack;
472
  void *     (*stack_allocate_hook)( unsigned32 );
473
  void       (*stack_free_hook)( void* );
474
  /* end of fields required on all CPUs */
475
}   rtems_cpu_table;
476
 
477
/*
478
 *  Macros to access required entires in the CPU Table are in
479
 *  the file rtems/system.h.
480
 *
481
 */
482
 
483
/*
484
 *  Macros to access OR1K specific additions to the CPU Table
485
 *
486
 */
487
 
488
/* There are no CPU specific additions to the CPU Table for this port. */
489
 
490
/*
491
 *  This variable is optional.  It is used on CPUs on which it is difficult
492
 *  to generate an "uninitialized" FP context.  It is filled in by
493
 *  _CPU_Initialize and copied into the task's FP context area during
494
 *  _CPU_Context_Initialize.
495
 *
496
 */
497
 
498
/* SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context; */
499
 
500
/*
501
 *  On some CPUs, RTEMS supports a software managed interrupt stack.
502
 *  This stack is allocated by the Interrupt Manager and the switch
503
 *  is performed in _ISR_Handler.  These variables contain pointers
504
 *  to the lowest and highest addresses in the chunk of memory allocated
505
 *  for the interrupt stack.  Since it is unknown whether the stack
506
 *  grows up or down (in general), this give the CPU dependent
507
 *  code the option of picking the version it wants to use.
508
 *
509
 *  NOTE: These two variables are required if the macro
510
 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
511
 *
512
 */
513
 
514
/*
515
SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
516
SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
517
*/
518
 
519
/*
520
 *  With some compilation systems, it is difficult if not impossible to
521
 *  call a high-level language routine from assembly language.  This
522
 *  is especially true of commercial Ada compilers and name mangling
523
 *  C++ ones.  This variable can be optionally defined by the CPU porter
524
 *  and contains the address of the routine _Thread_Dispatch.  This
525
 *  can make it easier to invoke that routine at the end of the interrupt
526
 *  sequence (if a dispatch is necessary).
527
 *
528
 */
529
 
530
SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
531
 
532
/*
533
 *  Nothing prevents the porter from declaring more CPU specific variables.
534
 *
535
 */
536
 
537
/* XXX: if needed, put more variables here */
538
 
539
/*
540
 *  The size of the floating point context area.  On some CPUs this
541
 *  will not be a "sizeof" because the format of the floating point
542
 *  area is not defined -- only the size is.  This is usually on
543
 *  CPUs with a "floating point save context" instruction.
544
 *
545
 *  Or1k Specific Information:
546
 *
547
 *  We don't support floating point in this version, so the size is 0
548
 */
549
 
550
#define CPU_CONTEXT_FP_SIZE 0
551
 
552
/*
553
 *  Amount of extra stack (above minimum stack size) required by
554
 *  MPCI receive server thread.  Remember that in a multiprocessor
555
 *  system this thread must exist and be able to process all directives.
556
 *
557
 */
558
 
559
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
560
 
561
/*
562
 *  This defines the number of entries in the ISR_Vector_table managed
563
 *  by RTEMS.
564
 *
565
 */
566
 
567
#define CPU_INTERRUPT_NUMBER_OF_VECTORS      16
568
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
569
 
570
/*
571
 *  Should be large enough to run all RTEMS tests.  This insures
572
 *  that a "reasonable" small application should not have any problems.
573
 *
574
 */
575
 
576
#define CPU_STACK_MINIMUM_SIZE          4096
577
 
578
/*
579
 *  CPU's worst alignment requirement for data types on a byte boundary.  This
580
 *  alignment does not take into account the requirements for the stack.
581
 *
582
 */
583
 
584
#define CPU_ALIGNMENT              8
585
 
586
/*
587
 *  This number corresponds to the byte alignment requirement for the
588
 *  heap handler.  This alignment requirement may be stricter than that
589
 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
590
 *  common for the heap to follow the same alignment requirement as
591
 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
592
 *  then this should be set to CPU_ALIGNMENT.
593
 *
594
 *  NOTE:  This does not have to be a power of 2 although it should be
595
 *         a multiple of 2 greater than or equal to 2.  The requirement
596
 *         to be a multiple of 2 is because the heap uses the least
597
 *         significant field of the front and back flags to indicate
598
 *         that a block is in use or free.  So you do not want any odd
599
 *         length blocks really putting length data in that bit.
600
 *
601
 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
602
 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
603
 *         elements allocated from the heap meet all restrictions.
604
 *
605
 */
606
 
607
#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
608
 
609
/*
610
 *  This number corresponds to the byte alignment requirement for memory
611
 *  buffers allocated by the partition manager.  This alignment requirement
612
 *  may be stricter than that for the data types alignment specified by
613
 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
614
 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
615
 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
616
 *
617
 *  NOTE:  This does not have to be a power of 2.  It does have to
618
 *         be greater or equal to than CPU_ALIGNMENT.
619
 *
620
 */
621
 
622
#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
623
 
624
/*
625
 *  This number corresponds to the byte alignment requirement for the
626
 *  stack.  This alignment requirement may be stricter than that for the
627
 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
628
 *  is strict enough for the stack, then this should be set to 0.
629
 *
630
 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
631
 *
632
 */
633
 
634
#define CPU_STACK_ALIGNMENT        0
635
 
636
/* ISR handler macros */
637
 
638
/*
639
 *  Support routine to initialize the RTEMS vector table after it is allocated.
640
 *
641
 *  NO_CPU Specific Information:
642
 *
643
 *  XXX document implementation including references if appropriate
644
 */
645
 
646
#define _CPU_Initialize_vectors()
647
 
648
 
649
/*
650
 *  Disable all interrupts for an RTEMS critical section.  The previous
651
 *  level is returned in _level.
652
 *
653
 */
654
 
655
#define _CPU_ISR_Disable( _isr_cookie ) \
656
  { \
657
    (_isr_cookie) = 0;   /* do something to prevent warnings */ \
658
  }
659
 
660
/*
661
 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
662
 *  This indicates the end of an RTEMS critical section.  The parameter
663
 *  _level is not modified.
664
 *
665
 */
666
 
667
#define _CPU_ISR_Enable( _isr_cookie )  \
668
  { \
669
  }
670
 
671
/*
672
 *  This temporarily restores the interrupt to _level before immediately
673
 *  disabling them again.  This is used to divide long RTEMS critical
674
 *  sections into two or more parts.  The parameter _level is not
675
 * modified.
676
 *
677
 */
678
 
679
#define _CPU_ISR_Flash( _isr_cookie ) \
680
  { \
681
  }
682
 
683
/*
684
 *  Map interrupt level in task mode onto the hardware that the CPU
685
 *  actually provides.  Currently, interrupt levels which do not
686
 *  map onto the CPU in a generic fashion are undefined.  Someday,
687
 *  it would be nice if these were "mapped" by the application
688
 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
689
 *  8 - 255 would be available for bsp/application specific meaning.
690
 *  This could be used to manage a programmable interrupt controller
691
 *  via the rtems_task_mode directive.
692
 *
693
 *  The get routine usually must be implemented as a subroutine.
694
 *
695
 */
696
 
697
#define _CPU_ISR_Set_level( new_level ) \
698
  { \
699
  }
700
 
701
unsigned32 _CPU_ISR_Get_level( void );
702
 
703
/* end of ISR handler macros */
704
 
705
/* Context handler macros */
706
 
707
/*
708
 *  Initialize the context to a state suitable for starting a
709
 *  task after a context restore operation.  Generally, this
710
 *  involves:
711
 *
712
 *     - setting a starting address
713
 *     - preparing the stack
714
 *     - preparing the stack and frame pointers
715
 *     - setting the proper interrupt level in the context
716
 *     - initializing the floating point context
717
 *
718
 *  This routine generally does not set any unnecessary register
719
 *  in the context.  The state of the "general data" registers is
720
 *  undefined at task start time.
721
 *
722
 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
723
 *        point thread.  This is typically only used on CPUs where the
724
 *        FPU may be easily disabled by software such as on the SPARC
725
 *        where the PSR contains an enable FPU bit.
726
 *
727
 */
728
 
729
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
730
                                 _isr, _entry_point, _is_fp ) \
731
  { \
732
  memset(_the_context,'\0',sizeof(Context_Control)); \
733
  (_the_context)->r[1] = (unsigned32*) ((unsigned32) (_stack_base) + (_size) ); \
734
  (_the_context)->r[2] = (unsigned32*) ((unsigned32) (_stack_base)); \
735
  (_the_context)->sr  = (_isr) ? 0x0000001B : 0x0000001F; \
736
  (_the_context)->pc  = (unsigned32*) _entry_point ; \
737
  }
738
 
739
/*
740
 *  This routine is responsible for somehow restarting the currently
741
 *  executing task.  If you are lucky, then all that is necessary
742
 *  is restoring the context.  Otherwise, there will need to be
743
 *  a special assembly routine which does something special in this
744
 *  case.  Context_Restore should work most of the time.  It will
745
 *  not work if restarting self conflicts with the stack frame
746
 *  assumptions of restoring a context.
747
 *
748
 */
749
 
750
#define _CPU_Context_Restart_self( _the_context ) \
751
   _CPU_Context_restore( (_the_context) );
752
 
753
/*
754
 *  The purpose of this macro is to allow the initial pointer into
755
 *  a floating point context area (used to save the floating point
756
 *  context) to be at an arbitrary place in the floating point
757
 *  context area.
758
 *
759
 *  This is necessary because some FP units are designed to have
760
 *  their context saved as a stack which grows into lower addresses.
761
 *  Other FP units can be saved by simply moving registers into offsets
762
 *  from the base of the context area.  Finally some FP units provide
763
 *  a "dump context" instruction which could fill in from high to low
764
 *  or low to high based on the whim of the CPU designers.
765
 *
766
 */
767
 
768
#define _CPU_Context_Fp_start( _base, _offset ) \
769
   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
770
 
771
/*
772
 *  This routine initializes the FP context area passed to it to.
773
 *  There are a few standard ways in which to initialize the
774
 *  floating point context.  The code included for this macro assumes
775
 *  that this is a CPU in which a "initial" FP context was saved into
776
 *  _CPU_Null_fp_context and it simply copies it to the destination
777
 *  context passed to it.
778
 *
779
 *  Other models include (1) not doing anything, and (2) putting
780
 *  a "null FP status word" in the correct place in the FP context.
781
 *
782
 */
783
 
784
#define _CPU_Context_Initialize_fp( _destination ) \
785
  { \
786
   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
787
  }
788
 
789
/* end of Context handler macros */
790
 
791
/* Fatal Error manager macros */
792
 
793
/*
794
 *  This routine copies _error into a known place -- typically a stack
795
 *  location or a register, optionally disables interrupts, and
796
 *  halts/stops the CPU.
797
 *
798
 */
799
 
800
#define _CPU_Fatal_halt( _error ) \
801
  { \
802
  }
803
 
804
/* end of Fatal Error manager macros */
805
 
806
/* Bitfield handler macros */
807
 
808
/*
809
 *  This routine sets _output to the bit number of the first bit
810
 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
811
 *  This type may be either 16 or 32 bits wide although only the 16
812
 *  least significant bits will be used.
813
 *
814
 *  There are a number of variables in using a "find first bit" type
815
 *  instruction.
816
 *
817
 *    (1) What happens when run on a value of zero?
818
 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
819
 *    (3) The numbering may be zero or one based.
820
 *    (4) The "find first bit" instruction may search from MSB or LSB.
821
 *
822
 *  RTEMS guarantees that (1) will never happen so it is not a concern.
823
 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
824
 *  _CPU_Priority_bits_index().  These three form a set of routines
825
 *  which must logically operate together.  Bits in the _value are
826
 *  set and cleared based on masks built by _CPU_Priority_mask().
827
 *  The basic major and minor values calculated by _Priority_Major()
828
 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
829
 *  to properly range between the values returned by the "find first bit"
830
 *  instruction.  This makes it possible for _Priority_Get_highest() to
831
 *  calculate the major and directly index into the minor table.
832
 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
833
 *  is the first bit found.
834
 *
835
 *  This entire "find first bit" and mapping process depends heavily
836
 *  on the manner in which a priority is broken into a major and minor
837
 *  components with the major being the 4 MSB of a priority and minor
838
 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
839
 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
840
 *  to the lowest priority.
841
 *
842
 *  If your CPU does not have a "find first bit" instruction, then
843
 *  there are ways to make do without it.  Here are a handful of ways
844
 *  to implement this in software:
845
 *
846
 *    - a series of 16 bit test instructions
847
 *    - a "binary search using if's"
848
 *    - _number = 0
849
 *      if _value > 0x00ff
850
 *        _value >>=8
851
 *        _number = 8;
852
 *
853
 *      if _value > 0x0000f
854
 *        _value >=8
855
 *        _number += 4
856
 *
857
 *      _number += bit_set_table[ _value ]
858
 *
859
 *    where bit_set_table[ 16 ] has values which indicate the first
860
 *      bit set
861
 *
862
 */
863
 
864
  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
865
#define CPU_USE_GENERIC_BITFIELD_CODE TRUE 
866
#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
867
 
868
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
869
 
870
  /* Get a value between 0 and N where N is the bit size */
871
  /* This routine makes use of the fact that CPUCFGR defines
872
     OB32S to have value 32, and OB64S to have value 64. If
873
     this ever changes then this routine will fail. */
874
#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
875
     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
876
                   "l.andi  %0,%0,0x60  \n\t"\
877
                   "l.ff1   %1,%1,r0    \n\t"\
878
                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
879
 
880
#endif
881
 
882
/* end of Bitfield handler macros */
883
 
884
/*
885
 *  This routine builds the mask which corresponds to the bit fields
886
 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
887
 *  for that routine.
888
 *
889
 */
890
 
891
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
892
 
893
#define _CPU_Priority_Mask( _bit_number ) \
894
    (1 << _bit_number)
895
 
896
#endif
897
 
898
/*
899
 *  This routine translates the bit numbers returned by
900
 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
901
 *  a major or minor component of a priority.  See the discussion
902
 *  for that routine.
903
 *
904
 */
905
 
906
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
907
 
908
#define _CPU_Priority_bits_index( _priority ) \
909
  (_priority)
910
 
911
#endif
912
 
913
/* end of Priority handler macros */
914
 
915
/* functions */
916
 
917
/*
918
 *  _CPU_Initialize
919
 *
920
 *  This routine performs CPU dependent initialization.
921
 *
922
 */
923
 
924
void _CPU_Initialize(
925
  rtems_cpu_table  *cpu_table,
926
  void      (*thread_dispatch)
927
);
928
 
929
/*
930
 *  _CPU_ISR_install_raw_handler
931
 *
932
 *  This routine installs a "raw" interrupt handler directly into the
933
 *  processor's vector table.
934
 *
935
 */
936
 
937
void _CPU_ISR_install_raw_handler(
938
  unsigned32  vector,
939
  proc_ptr    new_handler,
940
  proc_ptr   *old_handler
941
);
942
 
943
/*
944
 *  _CPU_ISR_install_vector
945
 *
946
 *  This routine installs an interrupt vector.
947
 *
948
 *  NO_CPU Specific Information:
949
 *
950
 *  XXX document implementation including references if appropriate
951
 */
952
 
953
void _CPU_ISR_install_vector(
954
  unsigned32  vector,
955
  proc_ptr    new_handler,
956
  proc_ptr   *old_handler
957
);
958
 
959
/*
960
 *  _CPU_Install_interrupt_stack
961
 *
962
 *  This routine installs the hardware interrupt stack pointer.
963
 *
964
 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
965
 *         is TRUE.
966
 *
967
 */
968
 
969
void _CPU_Install_interrupt_stack( void );
970
 
971
/*
972
 *  _CPU_Thread_Idle_body
973
 *
974
 *  This routine is the CPU dependent IDLE thread body.
975
 *
976
 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
977
 *         is TRUE.
978
 *
979
 */
980
 
981
void _CPU_Thread_Idle_body( void );
982
 
983
/*
984
 *  _CPU_Context_switch
985
 *
986
 *  This routine switches from the run context to the heir context.
987
 *
988
 *  Or1k Specific Information:
989
 *
990
 *  Please see the comments in the .c file for a description of how
991
 *  this function works. There are several things to be aware of.
992
 */
993
 
994
void _CPU_Context_switch(
995
  Context_Control  *run,
996
  Context_Control  *heir
997
);
998
 
999
/*
1000
 *  _CPU_Context_restore
1001
 *
1002
 *  This routine is generally used only to restart self in an
1003
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
1004
 *
1005
 *  NOTE: May be unnecessary to reload some registers.
1006
 *
1007
 */
1008
 
1009
void _CPU_Context_restore(
1010
  Context_Control *new_context
1011
);
1012
 
1013
/*
1014
 *  _CPU_Context_save_fp
1015
 *
1016
 *  This routine saves the floating point context passed to it.
1017
 *
1018
 */
1019
 
1020
void _CPU_Context_save_fp(
1021
  void **fp_context_ptr
1022
);
1023
 
1024
/*
1025
 *  _CPU_Context_restore_fp
1026
 *
1027
 *  This routine restores the floating point context passed to it.
1028
 *
1029
 */
1030
 
1031
void _CPU_Context_restore_fp(
1032
  void **fp_context_ptr
1033
);
1034
 
1035
/*  The following routine swaps the endian format of an unsigned int.
1036
 *  It must be static because it is referenced indirectly.
1037
 *
1038
 *  This version will work on any processor, but if there is a better
1039
 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1040
 *
1041
 *     swap least significant two bytes with 16-bit rotate
1042
 *     swap upper and lower 16-bits
1043
 *     swap most significant two bytes with 16-bit rotate
1044
 *
1045
 *  Some CPUs have special instructions which swap a 32-bit quantity in
1046
 *  a single instruction (e.g. i486).  It is probably best to avoid
1047
 *  an "endian swapping control bit" in the CPU.  One good reason is
1048
 *  that interrupts would probably have to be disabled to insure that
1049
 *  an interrupt does not try to access the same "chunk" with the wrong
1050
 *  endian.  Another good reason is that on some CPUs, the endian bit
1051
 *  endianness for ALL fetches -- both code and data -- so the code
1052
 *  will be fetched incorrectly.
1053
 *
1054
 */
1055
 
1056
static inline unsigned int CPU_swap_u32(
1057
  unsigned int value
1058
)
1059
{
1060
  unsigned32 byte1, byte2, byte3, byte4, swapped;
1061
 
1062
  byte4 = (value >> 24) & 0xff;
1063
  byte3 = (value >> 16) & 0xff;
1064
  byte2 = (value >> 8)  & 0xff;
1065
  byte1 =  value        & 0xff;
1066
 
1067
  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1068
  return( swapped );
1069
}
1070
 
1071
#define CPU_swap_u16( value ) \
1072
  (((value&0xff) << 8) | ((value >> 8)&0xff))
1073
 
1074
#ifdef __cplusplus
1075
}
1076
#endif
1077
 
1078
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.