OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [rtos/] [rtems/] [c/] [src/] [exec/] [score/] [cpu/] [or1k/] [rtems/] [score/] [cpu.h] - Blame information for rev 173

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 30 unneback
/*  cpu.h
2
 *
3
 *  This include file contains macros pertaining to the Opencores
4
 *  or1k processor family.
5
 *
6
 *  COPYRIGHT (c) 1989-1999.
7
 *  On-Line Applications Research Corporation (OAR).
8
 *
9
 *  The license and distribution terms for this file may be
10
 *  found in the file LICENSE in this distribution or at
11
 *  http://www.OARcorp.com/rtems/license.html.
12
 *
13
 *  This file adapted from no_cpu example of the RTEMS distribution.
14
 *  The body has been modified for the Opencores Or1k implementation by
15
 *  Chris Ziomkowski. <chris@asics.ws>
16
 *
17
 */
18
 
19
#ifndef _OR1K_CPU_h
20
#define _OR1K_CPU_h
21
 
22
#ifdef __cplusplus
23
extern "C" {
24
#endif
25
 
26
#include "rtems/score/or1k.h"            /* pick up machine definitions */
27
#ifndef ASM
28
#include "rtems/score/or1ktypes.h"
29
#endif
30
 
31
/* conditional compilation parameters */
32
 
33
/*
34
 *  Should the calls to _Thread_Enable_dispatch be inlined?
35
 *
36
 *  If TRUE, then they are inlined.
37
 *  If FALSE, then a subroutine call is made.
38
 *
39
 *  Basically this is an example of the classic trade-off of size
40
 *  versus speed.  Inlining the call (TRUE) typically increases the
41
 *  size of RTEMS while speeding up the enabling of dispatching.
42
 *  [NOTE: In general, the _Thread_Dispatch_disable_level will
43
 *  only be 0 or 1 unless you are in an interrupt handler and that
44
 *  interrupt handler invokes the executive.]  When not inlined
45
 *  something calls _Thread_Enable_dispatch which in turns calls
46
 *  _Thread_Dispatch.  If the enable dispatch is inlined, then
47
 *  one subroutine call is avoided entirely.]
48
 *
49
 */
50
 
51
#define CPU_INLINE_ENABLE_DISPATCH       FALSE
52
 
53
/*
54
 *  Should the body of the search loops in _Thread_queue_Enqueue_priority
55
 *  be unrolled one time?  In unrolled each iteration of the loop examines
56
 *  two "nodes" on the chain being searched.  Otherwise, only one node
57
 *  is examined per iteration.
58
 *
59
 *  If TRUE, then the loops are unrolled.
60
 *  If FALSE, then the loops are not unrolled.
61
 *
62
 *  The primary factor in making this decision is the cost of disabling
63
 *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
64
 *  body of the loop.  On some CPUs, the flash is more expensive than
65
 *  one iteration of the loop body.  In this case, it might be desirable
66
 *  to unroll the loop.  It is important to note that on some CPUs, this
67
 *  code is the longest interrupt disable period in RTEMS.  So it is
68
 *  necessary to strike a balance when setting this parameter.
69
 *
70
 */
71
 
72
#define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
73
 
74
/*
75
 *  Does RTEMS manage a dedicated interrupt stack in software?
76
 *
77
 *  If TRUE, then a stack is allocated in _ISR_Handler_initialization.
78
 *  If FALSE, nothing is done.
79
 *
80
 *  If the CPU supports a dedicated interrupt stack in hardware,
81
 *  then it is generally the responsibility of the BSP to allocate it
82
 *  and set it up.
83
 *
84
 *  If the CPU does not support a dedicated interrupt stack, then
85
 *  the porter has two options: (1) execute interrupts on the
86
 *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
87
 *  interrupt stack.
88
 *
89
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
90
 *
91
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
92
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
93
 *  possible that both are FALSE for a particular CPU.  Although it
94
 *  is unclear what that would imply about the interrupt processing
95
 *  procedure on that CPU.
96
 *
97
 *  For the first cut of an Or1k implementation, let's not worry
98
 *  about this, and assume that our C code will autoperform any
99
 *  frame/stack allocation for us when the procedure is entered.
100
 *  If we write assembly code, we may have to deal with this manually.
101
 *  This can be changed later if we find it is impossible. This
102
 *  behavior is desireable as it allows us to work in low memory
103
 *  environments where we don't have room for a dedicated stack.
104
 */
105
 
106
#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
107
 
108
/*
109
 *  Does this CPU have hardware support for a dedicated interrupt stack?
110
 *
111
 *  If TRUE, then it must be installed during initialization.
112
 *  If FALSE, then no installation is performed.
113
 *
114
 *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
115
 *
116
 *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
117
 *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
118
 *  possible that both are FALSE for a particular CPU.  Although it
119
 *  is unclear what that would imply about the interrupt processing
120
 *  procedure on that CPU.
121
 *
122
 */
123
 
124
#define CPU_HAS_HARDWARE_INTERRUPT_STACK FALSE
125
 
126
/*
127
 *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
128
 *
129
 *  If TRUE, then the memory is allocated during initialization.
130
 *  If FALSE, then the memory is allocated during initialization.
131
 *
132
 *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE
133
 *  or CPU_INSTALL_HARDWARE_INTERRUPT_STACK is TRUE.
134
 *
135
 */
136
 
137
#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
138
 
139
/*
140
 *  Does the RTEMS invoke the user's ISR with the vector number and
141
 *  a pointer to the saved interrupt frame (1) or just the vector
142
 *  number (0)?
143
 *
144
 */
145
 
146
#define CPU_ISR_PASSES_FRAME_POINTER 0
147
 
148
/*
149
 *  Does the CPU have hardware floating point?
150
 *
151
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
152
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
153
 *
154
 *  If there is a FP coprocessor such as the i387 or mc68881, then
155
 *  the answer is TRUE.
156
 *
157
 *  The macro name "OR1K_HAS_FPU" should be made CPU specific.
158
 *  It indicates whether or not this CPU model has FP support.  For
159
 *  example, it would be possible to have an i386_nofp CPU model
160
 *  which set this to false to indicate that you have an i386 without
161
 *  an i387 and wish to leave floating point support out of RTEMS.
162
 *
163
 *  The CPU_SOFTWARE_FP is used to indicate whether or not there
164
 *  is software implemented floating point that must be context
165
 *  switched.  The determination of whether or not this applies
166
 *  is very tool specific and the state saved/restored is also
167
 *  compiler specific.
168
 *
169
 *  Or1k Specific Information:
170
 *
171
 *  At this time there are no implementations of Or1k that are
172
 *  expected to implement floating point. More importantly, the
173
 *  floating point architecture is expected to change significantly
174
 *  before such chips are fabricated.
175
 */
176
 
177
#if ( OR1K_HAS_FPU == 1 )
178
#define CPU_HARDWARE_FP     TRUE
179
#define CPU_SOFTWARE_FP     FALSE
180
#else
181
#define CPU_HARDWARE_FP     FALSE
182
#define CPU_SOFTWARE_FP     TRUE
183
#endif
184
 
185
 
186
/*
187
 *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
188
 *
189
 *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
190
 *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
191
 *
192
 *  So far, the only CPU in which this option has been used is the
193
 *  HP PA-RISC.  The HP C compiler and gcc both implicitly use the
194
 *  floating point registers to perform integer multiplies.  If
195
 *  a function which you would not think utilize the FP unit DOES,
196
 *  then one can not easily predict which tasks will use the FP hardware.
197
 *  In this case, this option should be TRUE.
198
 *
199
 *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
200
 *
201
 */
202
 
203
#define CPU_ALL_TASKS_ARE_FP     FALSE
204
 
205
/*
206
 *  Should the IDLE task have a floating point context?
207
 *
208
 *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
209
 *  and it has a floating point context which is switched in and out.
210
 *  If FALSE, then the IDLE task does not have a floating point context.
211
 *
212
 *  Setting this to TRUE negatively impacts the time required to preempt
213
 *  the IDLE task from an interrupt because the floating point context
214
 *  must be saved as part of the preemption.
215
 *
216
 */
217
 
218
#define CPU_IDLE_TASK_IS_FP      FALSE
219
 
220
/*
221
 *  Should the saving of the floating point registers be deferred
222
 *  until a context switch is made to another different floating point
223
 *  task?
224
 *
225
 *  If TRUE, then the floating point context will not be stored until
226
 *  necessary.  It will remain in the floating point registers and not
227
 *  disturned until another floating point task is switched to.
228
 *
229
 *  If FALSE, then the floating point context is saved when a floating
230
 *  point task is switched out and restored when the next floating point
231
 *  task is restored.  The state of the floating point registers between
232
 *  those two operations is not specified.
233
 *
234
 *  If the floating point context does NOT have to be saved as part of
235
 *  interrupt dispatching, then it should be safe to set this to TRUE.
236
 *
237
 *  Setting this flag to TRUE results in using a different algorithm
238
 *  for deciding when to save and restore the floating point context.
239
 *  The deferred FP switch algorithm minimizes the number of times
240
 *  the FP context is saved and restored.  The FP context is not saved
241
 *  until a context switch is made to another, different FP task.
242
 *  Thus in a system with only one FP task, the FP context will never
243
 *  be saved or restored.
244
 *
245
 */
246
 
247
#define CPU_USE_DEFERRED_FP_SWITCH       TRUE
248
 
249
/*
250
 *  Does this port provide a CPU dependent IDLE task implementation?
251
 *
252
 *  If TRUE, then the routine _CPU_Thread_Idle_body
253
 *  must be provided and is the default IDLE thread body instead of
254
 *  _CPU_Thread_Idle_body.
255
 *
256
 *  If FALSE, then use the generic IDLE thread body if the BSP does
257
 *  not provide one.
258
 *
259
 *  This is intended to allow for supporting processors which have
260
 *  a low power or idle mode.  When the IDLE thread is executed, then
261
 *  the CPU can be powered down.
262
 *
263
 *  The order of precedence for selecting the IDLE thread body is:
264
 *
265
 *    1.  BSP provided
266
 *    2.  CPU dependent (if provided)
267
 *    3.  generic (if no BSP and no CPU dependent)
268
 *
269
 */
270
 
271
#define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
272
 
273
/*
274
 *  Does the stack grow up (toward higher addresses) or down
275
 *  (toward lower addresses)?
276
 *
277
 *  If TRUE, then the grows upward.
278
 *  If FALSE, then the grows toward smaller addresses.
279
 *
280
 *  Or1k Specific Information:
281
 *
282
 *  Previously I had misread the documentation and set this
283
 *  to true. Surprisingly, it seemed to work anyway. I'm
284
 *  therefore not 100% sure exactly what this does. It should
285
 *  be correct as it is now, however.
286
 */
287
 
288
#define CPU_STACK_GROWS_UP               FALSE
289
 
290
/*
291
 *  The following is the variable attribute used to force alignment
292
 *  of critical RTEMS structures.  On some processors it may make
293
 *  sense to have these aligned on tighter boundaries than
294
 *  the minimum requirements of the compiler in order to have as
295
 *  much of the critical data area as possible in a cache line.
296
 *
297
 *  The placement of this macro in the declaration of the variables
298
 *  is based on the syntactically requirements of the GNU C
299
 *  "__attribute__" extension.  For example with GNU C, use
300
 *  the following to force a structures to a 32 byte boundary.
301
 *
302
 *      __attribute__ ((aligned (32)))
303
 *
304
 *  NOTE:  Currently only the Priority Bit Map table uses this feature.
305
 *         To benefit from using this, the data must be heavily
306
 *         used so it will stay in the cache and used frequently enough
307
 *         in the executive to justify turning this on.
308
 *
309
 */
310
 
311
#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
312
 
313
/*
314
 *  Define what is required to specify how the network to host conversion
315
 *  routines are handled.
316
 *
317
 *  Or1k Specific Information:
318
 *
319
 *  This version of RTEMS is designed specifically to run with
320
 *  big endian architectures. If you want little endian, you'll
321
 *  have to make the appropriate adjustments here and write
322
 *  efficient routines for byte swapping. The Or1k architecture
323
 *  doesn't do this very well.
324
 */
325
 
326
#define CPU_HAS_OWN_HOST_TO_NETWORK_ROUTINES     FALSE
327
#define CPU_BIG_ENDIAN                           TRUE
328
#define CPU_LITTLE_ENDIAN                        FALSE
329
 
330
/*
331
 *  The following defines the number of bits actually used in the
332
 *  interrupt field of the task mode.  How those bits map to the
333
 *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
334
 *
335
 */
336
 
337
#define CPU_MODES_INTERRUPT_MASK   0x00000001
338
 
339
/*
340
 *  Processor defined structures
341
 *
342
 *  Examples structures include the descriptor tables from the i386
343
 *  and the processor control structure on the i960ca.
344
 *
345
 */
346
 
347
 
348
/*
349
 * Contexts
350
 *
351
 *  Generally there are 2 types of context to save.
352
 *     1. Interrupt registers to save
353
 *     2. Task level registers to save
354
 *
355
 *  This means we have the following 3 context items:
356
 *     1. task level context stuff::  Context_Control
357
 *     2. floating point task stuff:: Context_Control_fp
358
 *     3. special interrupt level context :: Context_Control_interrupt
359
 *
360
 *  On some processors, it is cost-effective to save only the callee
361
 *  preserved registers during a task context switch.  This means
362
 *  that the ISR code needs to save those registers which do not
363
 *  persist across function calls.  It is not mandatory to make this
364
 *  distinctions between the caller/callee saves registers for the
365
 *  purpose of minimizing context saved during task switch and on interrupts.
366
 *  If the cost of saving extra registers is minimal, simplicity is the
367
 *  choice.  Save the same context on interrupt entry as for tasks in
368
 *  this case.
369
 *
370
 *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
371
 *  care should be used in designing the context area.
372
 *
373
 *  On some CPUs with hardware floating point support, the Context_Control_fp
374
 *  structure will not be used or it simply consist of an array of a
375
 *  fixed number of bytes.   This is done when the floating point context
376
 *  is dumped by a "FP save context" type instruction and the format
377
 *  is not really defined by the CPU.  In this case, there is no need
378
 *  to figure out the exact format -- only the size.  Of course, although
379
 *  this is enough information for RTEMS, it is probably not enough for
380
 *  a debugger such as gdb.  But that is another problem.
381
 *
382
 *
383
 */
384
 
385
#ifdef OR1K_64BIT_ARCH
386
#define or1kreg unsigned64
387
#else
388
#define or1kreg unsigned32
389
#endif
390
 
391
/* SR_MASK is the mask of values that will be copied to/from the status
392
   register on a context switch. Some values, like the flag state, are
393
   specific on the context, while others, such as interrupt enables,
394
   are global. The currently defined global bits are:
395
 
396
   0x00001 SUPV:     Supervisor mode
397
   0x00002 EXR:      Exceptions on/off
398
   0x00004 EIR:      Interrupts enabled/disabled
399
   0x00008 DCE:      Data cache enabled/disabled
400
   0x00010 ICE:      Instruction cache enabled/disabled
401
   0x00020 DME:      Data MMU enabled/disabled
402
   0x00040 IME:      Instruction MMU enabled/disabled
403
   0x00080 LEE:      Little/Big Endian enable
404
   0x00100 CE:       Context ID/shadow regs enabled/disabled
405
   0x01000 OVE:      Overflow causes exception
406
   0x04000 EP:       Exceptions @ 0x0 or 0xF0000000
407
   0x08000 PXR:      Partial exception recognition enabled/disabled
408
   0x10000 SUMRA:    SPR's accessible/inaccessible
409
 
410
   The context specific bits are:
411
 
412
   0x00200 F         Branch flag indicator
413
   0x00400 CY        Carry flag indicator
414
   0x00800 OV        Overflow flag indicator
415
   0x02000 DSX       Delay slot exception occurred
416
   0xF8000000 CID    Current Context ID
417
*/
418
 
419
#define SR_MASK 0xF8002E00
420
 
421
typedef enum {
422
  SR_SUPV = 0x00001,
423
  SR_EXR = 0x00002,
424
  SR_EIR = 0x00004,
425
  SR_DCE = 0x00008,
426
  SR_ICE = 0x00010,
427
  SR_DME = 0x00020,
428
  SR_IME = 0x00040,
429
  SR_LEE = 0x00080,
430
  SR_CE = 0x00100,
431
  SR_F = 0x00200,
432
  SR_CY = 0x00400,
433
  SR_OV = 0x00800,
434
  SR_OVE = 0x01000,
435
  SR_DSX = 0x02000,
436
  SR_EP = 0x04000,
437
  SR_PXR = 0x08000,
438
  SR_SUMRA = 0x10000,
439
  SR_CID = 0xF8000000,
440
} StatusRegisterBits;
441
 
442
typedef struct {
443
  unsigned32  sr;     /* Current status register non persistent values */
444
  unsigned32  esr;    /* Saved exception status register */
445
  unsigned32  ear;    /* Saved exception effective address register */
446
  unsigned32  epc;    /* Saved exception PC register    */
447
  or1kreg     r[31];  /* Registers */
448
  or1kreg     pc;     /* Context PC 4 or 8 bytes for 64 bit alignment */
449
} Context_Control;
450
 
451
typedef int Context_Control_fp;
452
typedef Context_Control CPU_Interrupt_frame;
453
#define _CPU_Null_fp_context 0
454
#define _CPU_Interrupt_stack_low 0
455
#define _CPU_Interrupt_stack_high 0
456
 
457
/*
458
 *  The following table contains the information required to configure
459
 *  the XXX processor specific parameters.
460
 *
461
 */
462
 
463
typedef struct {
464
  void       (*pretasking_hook)( void );
465
  void       (*predriver_hook)( void );
466
  void       (*postdriver_hook)( void );
467
  void       (*idle_task)( void );
468
  boolean      do_zero_of_workspace;
469
  unsigned32   idle_task_stack_size;
470
  unsigned32   interrupt_stack_size;
471
  unsigned32   extra_mpci_receive_server_stack;
472
  void *     (*stack_allocate_hook)( unsigned32 );
473
  void       (*stack_free_hook)( void* );
474
  /* end of fields required on all CPUs */
475
}   rtems_cpu_table;
476
 
477
/*
478
 *  Macros to access required entires in the CPU Table are in
479
 *  the file rtems/system.h.
480
 *
481
 */
482
 
483
/*
484
 *  Macros to access OR1K specific additions to the CPU Table
485
 *
486
 */
487
 
488
/* There are no CPU specific additions to the CPU Table for this port. */
489
 
490
/*
491
 *  This variable is optional.  It is used on CPUs on which it is difficult
492
 *  to generate an "uninitialized" FP context.  It is filled in by
493
 *  _CPU_Initialize and copied into the task's FP context area during
494
 *  _CPU_Context_Initialize.
495
 *
496
 */
497
 
498
/* SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context; */
499
 
500
/*
501
 *  On some CPUs, RTEMS supports a software managed interrupt stack.
502
 *  This stack is allocated by the Interrupt Manager and the switch
503
 *  is performed in _ISR_Handler.  These variables contain pointers
504
 *  to the lowest and highest addresses in the chunk of memory allocated
505
 *  for the interrupt stack.  Since it is unknown whether the stack
506
 *  grows up or down (in general), this give the CPU dependent
507
 *  code the option of picking the version it wants to use.
508
 *
509
 *  NOTE: These two variables are required if the macro
510
 *        CPU_HAS_SOFTWARE_INTERRUPT_STACK is defined as TRUE.
511
 *
512
 */
513
 
514
/*
515
SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
516
SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
517
*/
518
 
519
/*
520
 *  With some compilation systems, it is difficult if not impossible to
521
 *  call a high-level language routine from assembly language.  This
522
 *  is especially true of commercial Ada compilers and name mangling
523
 *  C++ ones.  This variable can be optionally defined by the CPU porter
524
 *  and contains the address of the routine _Thread_Dispatch.  This
525
 *  can make it easier to invoke that routine at the end of the interrupt
526
 *  sequence (if a dispatch is necessary).
527
 *
528
 */
529
 
530
SCORE_EXTERN void           (*_CPU_Thread_dispatch_pointer)();
531
 
532
/*
533
 *  Nothing prevents the porter from declaring more CPU specific variables.
534
 *
535
 */
536
 
537
/* XXX: if needed, put more variables here */
538
 
539
/*
540
 *  The size of the floating point context area.  On some CPUs this
541
 *  will not be a "sizeof" because the format of the floating point
542
 *  area is not defined -- only the size is.  This is usually on
543
 *  CPUs with a "floating point save context" instruction.
544
 *
545
 *  Or1k Specific Information:
546
 *
547
 *  We don't support floating point in this version, so the size is 0
548
 */
549
 
550
#define CPU_CONTEXT_FP_SIZE 0
551
 
552
/*
553
 *  Amount of extra stack (above minimum stack size) required by
554
 *  MPCI receive server thread.  Remember that in a multiprocessor
555
 *  system this thread must exist and be able to process all directives.
556
 *
557
 */
558
 
559
#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
560
 
561
/*
562
 *  This defines the number of entries in the ISR_Vector_table managed
563
 *  by RTEMS.
564
 *
565
 */
566
 
567
#define CPU_INTERRUPT_NUMBER_OF_VECTORS      16
568
#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
569
 
570
/*
571
 *  Should be large enough to run all RTEMS tests.  This insures
572
 *  that a "reasonable" small application should not have any problems.
573
 *
574
 */
575
 
576
#define CPU_STACK_MINIMUM_SIZE          4096
577
 
578
/*
579
 *  CPU's worst alignment requirement for data types on a byte boundary.  This
580
 *  alignment does not take into account the requirements for the stack.
581
 *
582
 */
583
 
584
#define CPU_ALIGNMENT              8
585
 
586
/*
587
 *  This number corresponds to the byte alignment requirement for the
588
 *  heap handler.  This alignment requirement may be stricter than that
589
 *  for the data types alignment specified by CPU_ALIGNMENT.  It is
590
 *  common for the heap to follow the same alignment requirement as
591
 *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
592
 *  then this should be set to CPU_ALIGNMENT.
593
 *
594
 *  NOTE:  This does not have to be a power of 2 although it should be
595
 *         a multiple of 2 greater than or equal to 2.  The requirement
596
 *         to be a multiple of 2 is because the heap uses the least
597
 *         significant field of the front and back flags to indicate
598
 *         that a block is in use or free.  So you do not want any odd
599
 *         length blocks really putting length data in that bit.
600
 *
601
 *         On byte oriented architectures, CPU_HEAP_ALIGNMENT normally will
602
 *         have to be greater or equal to than CPU_ALIGNMENT to ensure that
603
 *         elements allocated from the heap meet all restrictions.
604
 *
605
 */
606
 
607
#define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
608
 
609
/*
610
 *  This number corresponds to the byte alignment requirement for memory
611
 *  buffers allocated by the partition manager.  This alignment requirement
612
 *  may be stricter than that for the data types alignment specified by
613
 *  CPU_ALIGNMENT.  It is common for the partition to follow the same
614
 *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
615
 *  enough for the partition, then this should be set to CPU_ALIGNMENT.
616
 *
617
 *  NOTE:  This does not have to be a power of 2.  It does have to
618
 *         be greater or equal to than CPU_ALIGNMENT.
619
 *
620
 */
621
 
622
#define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
623
 
624
/*
625
 *  This number corresponds to the byte alignment requirement for the
626
 *  stack.  This alignment requirement may be stricter than that for the
627
 *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
628
 *  is strict enough for the stack, then this should be set to 0.
629
 *
630
 *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
631
 *
632
 */
633
 
634
#define CPU_STACK_ALIGNMENT        0
635
 
636
/* ISR handler macros */
637
 
638
/*
639
 *  Disable all interrupts for an RTEMS critical section.  The previous
640
 *  level is returned in _level.
641
 *
642
 */
643
 
644
#define _CPU_ISR_Disable( _isr_cookie ) \
645
  { \
646
    (_isr_cookie) = 0;   /* do something to prevent warnings */ \
647
  }
648
 
649
/*
650
 *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
651
 *  This indicates the end of an RTEMS critical section.  The parameter
652
 *  _level is not modified.
653
 *
654
 */
655
 
656
#define _CPU_ISR_Enable( _isr_cookie )  \
657
  { \
658
  }
659
 
660
/*
661
 *  This temporarily restores the interrupt to _level before immediately
662
 *  disabling them again.  This is used to divide long RTEMS critical
663
 *  sections into two or more parts.  The parameter _level is not
664
 * modified.
665
 *
666
 */
667
 
668
#define _CPU_ISR_Flash( _isr_cookie ) \
669
  { \
670
  }
671
 
672
/*
673
 *  Map interrupt level in task mode onto the hardware that the CPU
674
 *  actually provides.  Currently, interrupt levels which do not
675
 *  map onto the CPU in a generic fashion are undefined.  Someday,
676
 *  it would be nice if these were "mapped" by the application
677
 *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
678
 *  8 - 255 would be available for bsp/application specific meaning.
679
 *  This could be used to manage a programmable interrupt controller
680
 *  via the rtems_task_mode directive.
681
 *
682
 *  The get routine usually must be implemented as a subroutine.
683
 *
684
 */
685
 
686
#define _CPU_ISR_Set_level( new_level ) \
687
  { \
688
  }
689
 
690
unsigned32 _CPU_ISR_Get_level( void );
691
 
692
/* end of ISR handler macros */
693
 
694
/* Context handler macros */
695
 
696
/*
697
 *  Initialize the context to a state suitable for starting a
698
 *  task after a context restore operation.  Generally, this
699
 *  involves:
700
 *
701
 *     - setting a starting address
702
 *     - preparing the stack
703
 *     - preparing the stack and frame pointers
704
 *     - setting the proper interrupt level in the context
705
 *     - initializing the floating point context
706
 *
707
 *  This routine generally does not set any unnecessary register
708
 *  in the context.  The state of the "general data" registers is
709
 *  undefined at task start time.
710
 *
711
 *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
712
 *        point thread.  This is typically only used on CPUs where the
713
 *        FPU may be easily disabled by software such as on the SPARC
714
 *        where the PSR contains an enable FPU bit.
715
 *
716
 */
717
 
718
#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
719
                                 _isr, _entry_point, _is_fp ) \
720
  { \
721
  memset(_the_context,'\0',sizeof(Context_Control)); \
722
  (_the_context)->r[1] = (unsigned32*) ((unsigned32) (_stack_base) + (_size) ); \
723
  (_the_context)->r[2] = (unsigned32*) ((unsigned32) (_stack_base)); \
724
  (_the_context)->sr  = (_isr) ? 0x0000001B : 0x0000001F; \
725
  (_the_context)->pc  = (unsigned32*) _entry_point ; \
726
  }
727
 
728
/*
729
 *  This routine is responsible for somehow restarting the currently
730
 *  executing task.  If you are lucky, then all that is necessary
731
 *  is restoring the context.  Otherwise, there will need to be
732
 *  a special assembly routine which does something special in this
733
 *  case.  Context_Restore should work most of the time.  It will
734
 *  not work if restarting self conflicts with the stack frame
735
 *  assumptions of restoring a context.
736
 *
737
 */
738
 
739
#define _CPU_Context_Restart_self( _the_context ) \
740
   _CPU_Context_restore( (_the_context) );
741
 
742
/*
743
 *  The purpose of this macro is to allow the initial pointer into
744
 *  a floating point context area (used to save the floating point
745
 *  context) to be at an arbitrary place in the floating point
746
 *  context area.
747
 *
748
 *  This is necessary because some FP units are designed to have
749
 *  their context saved as a stack which grows into lower addresses.
750
 *  Other FP units can be saved by simply moving registers into offsets
751
 *  from the base of the context area.  Finally some FP units provide
752
 *  a "dump context" instruction which could fill in from high to low
753
 *  or low to high based on the whim of the CPU designers.
754
 *
755
 */
756
 
757
#define _CPU_Context_Fp_start( _base, _offset ) \
758
   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
759
 
760
/*
761
 *  This routine initializes the FP context area passed to it to.
762
 *  There are a few standard ways in which to initialize the
763
 *  floating point context.  The code included for this macro assumes
764
 *  that this is a CPU in which a "initial" FP context was saved into
765
 *  _CPU_Null_fp_context and it simply copies it to the destination
766
 *  context passed to it.
767
 *
768
 *  Other models include (1) not doing anything, and (2) putting
769
 *  a "null FP status word" in the correct place in the FP context.
770
 *
771
 */
772
 
773
#define _CPU_Context_Initialize_fp( _destination ) \
774
  { \
775
   *((Context_Control_fp *) *((void **) _destination)) = _CPU_Null_fp_context; \
776
  }
777
 
778
/* end of Context handler macros */
779
 
780
/* Fatal Error manager macros */
781
 
782
/*
783
 *  This routine copies _error into a known place -- typically a stack
784
 *  location or a register, optionally disables interrupts, and
785
 *  halts/stops the CPU.
786
 *
787
 */
788
 
789
#define _CPU_Fatal_halt( _error ) \
790
  { \
791
  }
792
 
793
/* end of Fatal Error manager macros */
794
 
795
/* Bitfield handler macros */
796
 
797
/*
798
 *  This routine sets _output to the bit number of the first bit
799
 *  set in _value.  _value is of CPU dependent type Priority_Bit_map_control.
800
 *  This type may be either 16 or 32 bits wide although only the 16
801
 *  least significant bits will be used.
802
 *
803
 *  There are a number of variables in using a "find first bit" type
804
 *  instruction.
805
 *
806
 *    (1) What happens when run on a value of zero?
807
 *    (2) Bits may be numbered from MSB to LSB or vice-versa.
808
 *    (3) The numbering may be zero or one based.
809
 *    (4) The "find first bit" instruction may search from MSB or LSB.
810
 *
811
 *  RTEMS guarantees that (1) will never happen so it is not a concern.
812
 *  (2),(3), (4) are handled by the macros _CPU_Priority_mask() and
813
 *  _CPU_Priority_bits_index().  These three form a set of routines
814
 *  which must logically operate together.  Bits in the _value are
815
 *  set and cleared based on masks built by _CPU_Priority_mask().
816
 *  The basic major and minor values calculated by _Priority_Major()
817
 *  and _Priority_Minor() are "massaged" by _CPU_Priority_bits_index()
818
 *  to properly range between the values returned by the "find first bit"
819
 *  instruction.  This makes it possible for _Priority_Get_highest() to
820
 *  calculate the major and directly index into the minor table.
821
 *  This mapping is necessary to ensure that 0 (a high priority major/minor)
822
 *  is the first bit found.
823
 *
824
 *  This entire "find first bit" and mapping process depends heavily
825
 *  on the manner in which a priority is broken into a major and minor
826
 *  components with the major being the 4 MSB of a priority and minor
827
 *  the 4 LSB.  Thus (0 << 4) + 0 corresponds to priority 0 -- the highest
828
 *  priority.  And (15 << 4) + 14 corresponds to priority 254 -- the next
829
 *  to the lowest priority.
830
 *
831
 *  If your CPU does not have a "find first bit" instruction, then
832
 *  there are ways to make do without it.  Here are a handful of ways
833
 *  to implement this in software:
834
 *
835
 *    - a series of 16 bit test instructions
836
 *    - a "binary search using if's"
837
 *    - _number = 0
838
 *      if _value > 0x00ff
839
 *        _value >>=8
840
 *        _number = 8;
841
 *
842
 *      if _value > 0x0000f
843
 *        _value >=8
844
 *        _number += 4
845
 *
846
 *      _number += bit_set_table[ _value ]
847
 *
848
 *    where bit_set_table[ 16 ] has values which indicate the first
849
 *      bit set
850
 *
851
 */
852
 
853
  /* #define CPU_USE_GENERIC_BITFIELD_CODE FALSE */
854
#define CPU_USE_GENERIC_BITFIELD_CODE TRUE 
855
#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
856
 
857
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
858
 
859
  /* Get a value between 0 and N where N is the bit size */
860
  /* This routine makes use of the fact that CPUCFGR defines
861
     OB32S to have value 32, and OB64S to have value 64. If
862
     this ever changes then this routine will fail. */
863
#define _CPU_Bitfield_Find_first_bit( _value, _output ) \
864
     asm volatile ("l.mfspr %0,r0,0x2   \n\t"\
865
                   "l.andi  %0,%0,0x60  \n\t"\
866
                   "l.ff1   %1,%1,r0    \n\t"\
867
                   "l.sub   %0,%0,%1    \n\t" : "=&r" (_output), "+r" (_value));
868
 
869
#endif
870
 
871
/* end of Bitfield handler macros */
872
 
873
/*
874
 *  This routine builds the mask which corresponds to the bit fields
875
 *  as searched by _CPU_Bitfield_Find_first_bit().  See the discussion
876
 *  for that routine.
877
 *
878
 */
879
 
880
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
881
 
882
#define _CPU_Priority_Mask( _bit_number ) \
883
    (1 << _bit_number)
884
 
885
#endif
886
 
887
/*
888
 *  This routine translates the bit numbers returned by
889
 *  _CPU_Bitfield_Find_first_bit() into something suitable for use as
890
 *  a major or minor component of a priority.  See the discussion
891
 *  for that routine.
892
 *
893
 */
894
 
895
#if (CPU_USE_GENERIC_BITFIELD_CODE == FALSE)
896
 
897
#define _CPU_Priority_bits_index( _priority ) \
898
  (_priority)
899
 
900
#endif
901
 
902
/* end of Priority handler macros */
903
 
904
/* functions */
905
 
906
/*
907
 *  _CPU_Initialize
908
 *
909
 *  This routine performs CPU dependent initialization.
910
 *
911
 */
912
 
913
void _CPU_Initialize(
914
  rtems_cpu_table  *cpu_table,
915
  void      (*thread_dispatch)
916
);
917
 
918
/*
919
 *  _CPU_ISR_install_raw_handler
920
 *
921
 *  This routine installs a "raw" interrupt handler directly into the
922
 *  processor's vector table.
923
 *
924
 */
925
 
926
void _CPU_ISR_install_raw_handler(
927
  unsigned32  vector,
928
  proc_ptr    new_handler,
929
  proc_ptr   *old_handler
930
);
931
 
932
/*
933
 *  _CPU_ISR_install_vector
934
 *
935
 *  This routine installs an interrupt vector.
936
 *
937
 *  NO_CPU Specific Information:
938
 *
939
 *  XXX document implementation including references if appropriate
940
 */
941
 
942
void _CPU_ISR_install_vector(
943
  unsigned32  vector,
944
  proc_ptr    new_handler,
945
  proc_ptr   *old_handler
946
);
947
 
948
/*
949
 *  _CPU_Install_interrupt_stack
950
 *
951
 *  This routine installs the hardware interrupt stack pointer.
952
 *
953
 *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
954
 *         is TRUE.
955
 *
956
 */
957
 
958
void _CPU_Install_interrupt_stack( void );
959
 
960
/*
961
 *  _CPU_Thread_Idle_body
962
 *
963
 *  This routine is the CPU dependent IDLE thread body.
964
 *
965
 *  NOTE:  It need only be provided if CPU_PROVIDES_IDLE_THREAD_BODY
966
 *         is TRUE.
967
 *
968
 */
969
 
970
void _CPU_Thread_Idle_body( void );
971
 
972
/*
973
 *  _CPU_Context_switch
974
 *
975
 *  This routine switches from the run context to the heir context.
976
 *
977
 *  Or1k Specific Information:
978
 *
979
 *  Please see the comments in the .c file for a description of how
980
 *  this function works. There are several things to be aware of.
981
 */
982
 
983
void _CPU_Context_switch(
984
  Context_Control  *run,
985
  Context_Control  *heir
986
);
987
 
988
/*
989
 *  _CPU_Context_restore
990
 *
991
 *  This routine is generally used only to restart self in an
992
 *  efficient manner.  It may simply be a label in _CPU_Context_switch.
993
 *
994
 *  NOTE: May be unnecessary to reload some registers.
995
 *
996
 */
997
 
998
void _CPU_Context_restore(
999
  Context_Control *new_context
1000
);
1001
 
1002
/*
1003
 *  _CPU_Context_save_fp
1004
 *
1005
 *  This routine saves the floating point context passed to it.
1006
 *
1007
 */
1008
 
1009
void _CPU_Context_save_fp(
1010
  void **fp_context_ptr
1011
);
1012
 
1013
/*
1014
 *  _CPU_Context_restore_fp
1015
 *
1016
 *  This routine restores the floating point context passed to it.
1017
 *
1018
 */
1019
 
1020
void _CPU_Context_restore_fp(
1021
  void **fp_context_ptr
1022
);
1023
 
1024
/*  The following routine swaps the endian format of an unsigned int.
1025
 *  It must be static because it is referenced indirectly.
1026
 *
1027
 *  This version will work on any processor, but if there is a better
1028
 *  way for your CPU PLEASE use it.  The most common way to do this is to:
1029
 *
1030
 *     swap least significant two bytes with 16-bit rotate
1031
 *     swap upper and lower 16-bits
1032
 *     swap most significant two bytes with 16-bit rotate
1033
 *
1034
 *  Some CPUs have special instructions which swap a 32-bit quantity in
1035
 *  a single instruction (e.g. i486).  It is probably best to avoid
1036
 *  an "endian swapping control bit" in the CPU.  One good reason is
1037
 *  that interrupts would probably have to be disabled to insure that
1038
 *  an interrupt does not try to access the same "chunk" with the wrong
1039
 *  endian.  Another good reason is that on some CPUs, the endian bit
1040
 *  endianness for ALL fetches -- both code and data -- so the code
1041
 *  will be fetched incorrectly.
1042
 *
1043
 */
1044
 
1045
static inline unsigned int CPU_swap_u32(
1046
  unsigned int value
1047
)
1048
{
1049
  unsigned32 byte1, byte2, byte3, byte4, swapped;
1050
 
1051
  byte4 = (value >> 24) & 0xff;
1052
  byte3 = (value >> 16) & 0xff;
1053
  byte2 = (value >> 8)  & 0xff;
1054
  byte1 =  value        & 0xff;
1055
 
1056
  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
1057
  return( swapped );
1058
}
1059
 
1060
#define CPU_swap_u16( value ) \
1061
  (((value&0xff) << 8) | ((value >> 8)&0xff))
1062
 
1063
#ifdef __cplusplus
1064
}
1065
#endif
1066
 
1067
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.