OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gdb-6.8/] [sim/] [frv/] [profile.c] - Blame information for rev 819

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 24 jeremybenn
/* frv simulator machine independent profiling code.
2
 
3
   Copyright (C) 1998, 1999, 2000, 2001, 2003, 2007, 2008
4
   Free Software Foundation, Inc.
5
   Contributed by Red Hat
6
 
7
This file is part of the GNU simulators.
8
 
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 3 of the License, or
12
(at your option) any later version.
13
 
14
This program is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
GNU General Public License for more details.
18
 
19
You should have received a copy of the GNU General Public License
20
along with this program.  If not, see <http://www.gnu.org/licenses/>.
21
 
22
*/
23
#define WANT_CPU
24
#define WANT_CPU_FRVBF
25
 
26
#include "sim-main.h"
27
#include "bfd.h"
28
 
29
#if WITH_PROFILE_MODEL_P
30
 
31
#include "profile.h"
32
#include "profile-fr400.h"
33
#include "profile-fr500.h"
34
#include "profile-fr550.h"
35
 
36
static void
37
reset_gr_flags (SIM_CPU *cpu, INT gr)
38
{
39
  SIM_DESC sd = CPU_STATE (cpu);
40
  if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
41
      || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
42
    fr400_reset_gr_flags (cpu, gr);
43
  /* Other machines have no gr flags right now.  */
44
}
45
 
46
static void
47
reset_fr_flags (SIM_CPU *cpu, INT fr)
48
{
49
  SIM_DESC sd = CPU_STATE (cpu);
50
  if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
51
      || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
52
    fr400_reset_fr_flags (cpu, fr);
53
  else if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
54
    fr500_reset_fr_flags (cpu, fr);
55
}
56
 
57
static void
58
reset_acc_flags (SIM_CPU *cpu, INT acc)
59
{
60
  SIM_DESC sd = CPU_STATE (cpu);
61
  if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400
62
      || STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr450)
63
    fr400_reset_acc_flags (cpu, acc);
64
  /* Other machines have no acc flags right now.  */
65
}
66
 
67
static void
68
reset_cc_flags (SIM_CPU *cpu, INT cc)
69
{
70
  SIM_DESC sd = CPU_STATE (cpu);
71
  if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500)
72
    fr500_reset_cc_flags (cpu, cc);
73
  /* Other machines have no cc flags.  */
74
}
75
 
76
void
77
set_use_is_gr_complex (SIM_CPU *cpu, INT gr)
78
{
79
  if (gr != -1)
80
    {
81
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
82
      reset_gr_flags (cpu, gr);
83
      ps->cur_gr_complex |= (((DI)1) << gr);
84
    }
85
}
86
 
87
void
88
set_use_not_gr_complex (SIM_CPU *cpu, INT gr)
89
{
90
  if (gr != -1)
91
    {
92
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
93
      ps->cur_gr_complex &= ~(((DI)1) << gr);
94
    }
95
}
96
 
97
int
98
use_is_gr_complex (SIM_CPU *cpu, INT gr)
99
{
100
  if (gr != -1)
101
    {
102
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
103
      return ps->cur_gr_complex & (((DI)1) << gr);
104
    }
105
  return 0;
106
}
107
 
108
/* Globals flag indicates whether this insn is being modeled.  */
109
enum FRV_INSN_MODELING model_insn = FRV_INSN_NO_MODELING;
110
 
111
/* static buffer for the name of the currently most restrictive hazard.  */
112
static char hazard_name[100] = "";
113
 
114
/* Print information about the wait applied to an entire VLIW insn.  */
115
FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer[]
116
= {
117
  {1, NO_REQNO}, {1, NO_REQNO} /* init with impossible address.  */
118
};
119
 
120
enum cache_request
121
{
122
  cache_load,
123
  cache_invalidate,
124
  cache_flush,
125
  cache_preload,
126
  cache_unlock
127
};
128
 
129
/* A queue of load requests from the data cache. Use to keep track of loads
130
   which are still pending.  */
131
/* TODO -- some of these are mutually exclusive and can use a union.  */
132
typedef struct
133
{
134
  FRV_CACHE *cache;
135
  unsigned reqno;
136
  SI address;
137
  int length;
138
  int is_signed;
139
  int regnum;
140
  int cycles;
141
  int regtype;
142
  int lock;
143
  int all;
144
  int slot;
145
  int active;
146
  enum cache_request request;
147
} CACHE_QUEUE_ELEMENT;
148
 
149
#define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
150
struct
151
{
152
  unsigned reqno;
153
  int ix;
154
  CACHE_QUEUE_ELEMENT q[CACHE_QUEUE_SIZE];
155
} cache_queue = {0, 0};
156
 
157
/* Queue a request for a load from the cache. The load will be queued as
158
   'inactive' and will be requested after the given number
159
   of cycles have passed from the point the load is activated.  */
160
void
161
request_cache_load (SIM_CPU *cpu, INT regnum, int regtype, int cycles)
162
{
163
  CACHE_QUEUE_ELEMENT *q;
164
  FRV_VLIW *vliw;
165
  int slot;
166
 
167
  /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
168
     zero.  */
169
  if (CPU_LOAD_LENGTH (cpu) == 0)
170
    return;
171
 
172
  if (cache_queue.ix >= CACHE_QUEUE_SIZE)
173
    abort (); /* TODO: Make the queue dynamic */
174
 
175
  q = & cache_queue.q[cache_queue.ix];
176
  ++cache_queue.ix;
177
 
178
  q->reqno = cache_queue.reqno++;
179
  q->request = cache_load;
180
  q->cache = CPU_DATA_CACHE (cpu);
181
  q->address = CPU_LOAD_ADDRESS (cpu);
182
  q->length = CPU_LOAD_LENGTH (cpu);
183
  q->is_signed = CPU_LOAD_SIGNED (cpu);
184
  q->regnum = regnum;
185
  q->regtype = regtype;
186
  q->cycles = cycles;
187
  q->active = 0;
188
 
189
  vliw = CPU_VLIW (cpu);
190
  slot = vliw->next_slot - 1;
191
  q->slot = (*vliw->current_vliw)[slot];
192
 
193
  CPU_LOAD_LENGTH (cpu) = 0;
194
}
195
 
196
/* Queue a request to flush the cache. The request will be queued as
197
   'inactive' and will be requested after the given number
198
   of cycles have passed from the point the request is activated.  */
199
void
200
request_cache_flush (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
201
{
202
  CACHE_QUEUE_ELEMENT *q;
203
  FRV_VLIW *vliw;
204
  int slot;
205
 
206
  if (cache_queue.ix >= CACHE_QUEUE_SIZE)
207
    abort (); /* TODO: Make the queue dynamic */
208
 
209
  q = & cache_queue.q[cache_queue.ix];
210
  ++cache_queue.ix;
211
 
212
  q->reqno = cache_queue.reqno++;
213
  q->request = cache_flush;
214
  q->cache = cache;
215
  q->address = CPU_LOAD_ADDRESS (cpu);
216
  q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
217
  q->cycles = cycles;
218
  q->active = 0;
219
 
220
  vliw = CPU_VLIW (cpu);
221
  slot = vliw->next_slot - 1;
222
  q->slot = (*vliw->current_vliw)[slot];
223
}
224
 
225
/* Queue a request to invalidate the cache. The request will be queued as
226
   'inactive' and will be requested after the given number
227
   of cycles have passed from the point the request is activated.  */
228
void
229
request_cache_invalidate (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
230
{
231
  CACHE_QUEUE_ELEMENT *q;
232
  FRV_VLIW *vliw;
233
  int slot;
234
 
235
  if (cache_queue.ix >= CACHE_QUEUE_SIZE)
236
    abort (); /* TODO: Make the queue dynamic */
237
 
238
  q = & cache_queue.q[cache_queue.ix];
239
  ++cache_queue.ix;
240
 
241
  q->reqno = cache_queue.reqno++;
242
  q->request = cache_invalidate;
243
  q->cache = cache;
244
  q->address = CPU_LOAD_ADDRESS (cpu);
245
  q->all = CPU_PROFILE_STATE (cpu)->all_cache_entries;
246
  q->cycles = cycles;
247
  q->active = 0;
248
 
249
  vliw = CPU_VLIW (cpu);
250
  slot = vliw->next_slot - 1;
251
  q->slot = (*vliw->current_vliw)[slot];
252
}
253
 
254
/* Queue a request to preload the cache. The request will be queued as
255
   'inactive' and will be requested after the given number
256
   of cycles have passed from the point the request is activated.  */
257
void
258
request_cache_preload (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
259
{
260
  CACHE_QUEUE_ELEMENT *q;
261
  FRV_VLIW *vliw;
262
  int slot;
263
 
264
  if (cache_queue.ix >= CACHE_QUEUE_SIZE)
265
    abort (); /* TODO: Make the queue dynamic */
266
 
267
  q = & cache_queue.q[cache_queue.ix];
268
  ++cache_queue.ix;
269
 
270
  q->reqno = cache_queue.reqno++;
271
  q->request = cache_preload;
272
  q->cache = cache;
273
  q->address = CPU_LOAD_ADDRESS (cpu);
274
  q->length = CPU_LOAD_LENGTH (cpu);
275
  q->lock = CPU_LOAD_LOCK (cpu);
276
  q->cycles = cycles;
277
  q->active = 0;
278
 
279
  vliw = CPU_VLIW (cpu);
280
  slot = vliw->next_slot - 1;
281
  q->slot = (*vliw->current_vliw)[slot];
282
 
283
  CPU_LOAD_LENGTH (cpu) = 0;
284
}
285
 
286
/* Queue a request to unlock the cache. The request will be queued as
287
   'inactive' and will be requested after the given number
288
   of cycles have passed from the point the request is activated.  */
289
void
290
request_cache_unlock (SIM_CPU *cpu, FRV_CACHE *cache, int cycles)
291
{
292
  CACHE_QUEUE_ELEMENT *q;
293
  FRV_VLIW *vliw;
294
  int slot;
295
 
296
  if (cache_queue.ix >= CACHE_QUEUE_SIZE)
297
    abort (); /* TODO: Make the queue dynamic */
298
 
299
  q = & cache_queue.q[cache_queue.ix];
300
  ++cache_queue.ix;
301
 
302
  q->reqno = cache_queue.reqno++;
303
  q->request = cache_unlock;
304
  q->cache = cache;
305
  q->address = CPU_LOAD_ADDRESS (cpu);
306
  q->cycles = cycles;
307
  q->active = 0;
308
 
309
  vliw = CPU_VLIW (cpu);
310
  slot = vliw->next_slot - 1;
311
  q->slot = (*vliw->current_vliw)[slot];
312
}
313
 
314
static void
315
submit_cache_request (CACHE_QUEUE_ELEMENT *q)
316
{
317
  switch (q->request)
318
    {
319
    case cache_load:
320
      frv_cache_request_load (q->cache, q->reqno, q->address, q->slot);
321
      break;
322
    case cache_flush:
323
      frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
324
                                    q->all, 1/*flush*/);
325
      break;
326
    case cache_invalidate:
327
      frv_cache_request_invalidate (q->cache, q->reqno, q->address, q->slot,
328
                                   q->all, 0/*flush*/);
329
      break;
330
    case cache_preload:
331
      frv_cache_request_preload (q->cache, q->address, q->slot,
332
                                 q->length, q->lock);
333
      break;
334
    case cache_unlock:
335
      frv_cache_request_unlock (q->cache, q->address, q->slot);
336
      break;
337
    default:
338
      abort ();
339
    }
340
}
341
 
342
/* Activate all inactive load requests.  */
343
static void
344
activate_cache_requests (SIM_CPU *cpu)
345
{
346
  int i;
347
  for (i = 0; i < cache_queue.ix; ++i)
348
    {
349
      CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
350
      if (! q->active)
351
        {
352
          q->active = 1;
353
          /* Submit the request now if the cycle count is zero.  */
354
          if (q->cycles == 0)
355
            submit_cache_request (q);
356
        }
357
    }
358
}
359
 
360
/* Check to see if a load is pending which affects the given register(s).
361
 */
362
int
363
load_pending_for_register (SIM_CPU *cpu, int regnum, int words, int regtype)
364
{
365
  int i;
366
  for (i = 0; i < cache_queue.ix; ++i)
367
    {
368
      CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
369
 
370
      /* Must be the same kind of register.  */
371
      if (! q->active || q->request != cache_load || q->regtype != regtype)
372
        continue;
373
 
374
      /* If the registers numbers are equal, then we have a match.  */
375
      if (q->regnum == regnum)
376
        return 1; /* load pending */
377
 
378
      /* Check for overlap of a load with a multi-word register.  */
379
      if (regnum < q->regnum)
380
        {
381
          if (regnum + words > q->regnum)
382
            return 1;
383
        }
384
      /* Check for overlap of a multi-word load with the register.  */
385
      else
386
        {
387
          int data_words = (q->length + sizeof (SI) - 1) / sizeof (SI);
388
          if (q->regnum + data_words > regnum)
389
            return 1;
390
        }
391
    }
392
 
393
  return 0; /* no load pending */
394
}
395
 
396
/* Check to see if a cache flush pending which affects the given address.  */
397
static int
398
flush_pending_for_address (SIM_CPU *cpu, SI address)
399
{
400
  int line_mask = ~(CPU_DATA_CACHE (cpu)->line_size - 1);
401
  int i;
402
  for (i = 0; i < cache_queue.ix; ++i)
403
    {
404
      CACHE_QUEUE_ELEMENT *q = & cache_queue.q[i];
405
 
406
      /* Must be the same kind of request and active.  */
407
      if (! q->active || q->request != cache_flush)
408
        continue;
409
 
410
      /* If the addresses are equal, then we have a match.  */
411
      if ((q->address & line_mask) == (address & line_mask))
412
        return 1; /* flush pending */
413
    }
414
 
415
  return 0; /* no flush pending */
416
}
417
 
418
static void
419
remove_cache_queue_element (SIM_CPU *cpu, int i)
420
{
421
  /* If we are removing the load of a FR register, then remember which one(s).
422
   */
423
  CACHE_QUEUE_ELEMENT q = cache_queue.q[i];
424
 
425
  for (--cache_queue.ix; i < cache_queue.ix; ++i)
426
    cache_queue.q[i] = cache_queue.q[i + 1];
427
 
428
  /* If we removed a load of a FR register, check to see if any other loads
429
     of that register is still queued. If not, then apply the queued post
430
     processing time of that register to its latency.  Also apply
431
     1 extra cycle of latency to the register since it was a floating point
432
     load.  */
433
  if (q.request == cache_load && q.regtype != REGTYPE_NONE)
434
    {
435
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
436
      int data_words = (q.length + sizeof (SI) - 1) / sizeof (SI);
437
      int j;
438
      for (j = 0; j < data_words; ++j)
439
        {
440
          int regnum = q.regnum + j;
441
          if (! load_pending_for_register (cpu, regnum, 1, q.regtype))
442
            {
443
              if (q.regtype == REGTYPE_FR)
444
                {
445
                  int *fr = ps->fr_busy;
446
                  fr[regnum] += 1 + ps->fr_ptime[regnum];
447
                  ps->fr_ptime[regnum] = 0;
448
                }
449
            }
450
        }
451
    }
452
}
453
 
454
/* Copy data from the cache buffer to the target register(s).  */
455
static void
456
copy_load_data (SIM_CPU *current_cpu, FRV_CACHE *cache, int slot,
457
                CACHE_QUEUE_ELEMENT *q)
458
{
459
  switch (q->length)
460
    {
461
    case 1:
462
      if (q->regtype == REGTYPE_FR)
463
        {
464
          if (q->is_signed)
465
            {
466
              QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
467
              SET_H_FR (q->regnum, value);
468
            }
469
          else
470
            {
471
              UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
472
              SET_H_FR (q->regnum, value);
473
            }
474
        }
475
      else
476
        {
477
          if (q->is_signed)
478
            {
479
              QI value = CACHE_RETURN_DATA (cache, slot, q->address, QI, 1);
480
              SET_H_GR (q->regnum, value);
481
            }
482
          else
483
            {
484
              UQI value = CACHE_RETURN_DATA (cache, slot, q->address, UQI, 1);
485
              SET_H_GR (q->regnum, value);
486
            }
487
        }
488
      break;
489
    case 2:
490
      if (q->regtype == REGTYPE_FR)
491
        {
492
          if (q->is_signed)
493
            {
494
              HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
495
              SET_H_FR (q->regnum, value);
496
            }
497
          else
498
            {
499
              UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
500
              SET_H_FR (q->regnum, value);
501
            }
502
        }
503
      else
504
        {
505
          if (q->is_signed)
506
            {
507
              HI value = CACHE_RETURN_DATA (cache, slot, q->address, HI, 2);
508
              SET_H_GR (q->regnum, value);
509
            }
510
          else
511
            {
512
              UHI value = CACHE_RETURN_DATA (cache, slot, q->address, UHI, 2);
513
              SET_H_GR (q->regnum, value);
514
            }
515
        }
516
      break;
517
    case 4:
518
      if (q->regtype == REGTYPE_FR)
519
        {
520
          SET_H_FR (q->regnum,
521
                    CACHE_RETURN_DATA (cache, slot, q->address, SF, 4));
522
        }
523
      else
524
        {
525
          SET_H_GR (q->regnum,
526
                    CACHE_RETURN_DATA (cache, slot, q->address, SI, 4));
527
        }
528
      break;
529
    case 8:
530
      if (q->regtype == REGTYPE_FR)
531
        {
532
          SET_H_FR_DOUBLE (q->regnum,
533
                           CACHE_RETURN_DATA (cache, slot, q->address, DF, 8));
534
        }
535
      else
536
        {
537
          SET_H_GR_DOUBLE (q->regnum,
538
                           CACHE_RETURN_DATA (cache, slot, q->address, DI, 8));
539
        }
540
      break;
541
    case 16:
542
      if (q->regtype == REGTYPE_FR)
543
        frvbf_h_fr_quad_set_handler (current_cpu, q->regnum,
544
                                     CACHE_RETURN_DATA_ADDRESS (cache, slot,
545
                                                                q->address,
546
                                                                16));
547
      else
548
        frvbf_h_gr_quad_set_handler (current_cpu, q->regnum,
549
                                     CACHE_RETURN_DATA_ADDRESS (cache, slot,
550
                                                                q->address,
551
                                                                16));
552
      break;
553
    default:
554
      abort ();
555
    }
556
}
557
 
558
static int
559
request_complete (SIM_CPU *cpu, CACHE_QUEUE_ELEMENT *q)
560
{
561
  FRV_CACHE* cache;
562
  if (! q->active || q->cycles > 0)
563
    return 0;
564
 
565
  cache = CPU_DATA_CACHE (cpu);
566
  switch (q->request)
567
    {
568
    case cache_load:
569
      /* For loads, we must wait until the data is returned from the cache.  */
570
      if (frv_cache_data_in_buffer (cache, 0, q->address, q->reqno))
571
        {
572
          copy_load_data (cpu, cache, 0, q);
573
          return 1;
574
        }
575
      if (frv_cache_data_in_buffer (cache, 1, q->address, q->reqno))
576
        {
577
          copy_load_data (cpu, cache, 1, q);
578
          return 1;
579
        }
580
      break;
581
 
582
    case cache_flush:
583
      /* We must wait until the data is flushed.  */
584
      if (frv_cache_data_flushed (cache, 0, q->address, q->reqno))
585
        return 1;
586
      if (frv_cache_data_flushed (cache, 1, q->address, q->reqno))
587
        return 1;
588
      break;
589
 
590
    default:
591
      /* All other requests are complete once they've been made.  */
592
      return 1;
593
    }
594
 
595
  return 0;
596
}
597
 
598
/* Run the insn and data caches through the given number of cycles, taking
599
   note of load requests which are fullfilled as a result.  */
600
static void
601
run_caches (SIM_CPU *cpu, int cycles)
602
{
603
  FRV_CACHE* data_cache = CPU_DATA_CACHE (cpu);
604
  FRV_CACHE* insn_cache = CPU_INSN_CACHE (cpu);
605
  int i;
606
  /* For each cycle, run the caches, noting which requests have been fullfilled
607
     and submitting new requests on their designated cycles.  */
608
  for (i = 0; i < cycles; ++i)
609
    {
610
      int j;
611
      /* Run the caches through 1 cycle.  */
612
      frv_cache_run (data_cache, 1);
613
      frv_cache_run (insn_cache, 1);
614
 
615
      /* Note whether prefetched insn data has been loaded yet.  */
616
      for (j = LS; j < FRV_CACHE_PIPELINES; ++j)
617
        {
618
          if (frv_insn_fetch_buffer[j].reqno != NO_REQNO
619
              && frv_cache_data_in_buffer (insn_cache, j,
620
                                           frv_insn_fetch_buffer[j].address,
621
                                           frv_insn_fetch_buffer[j].reqno))
622
            frv_insn_fetch_buffer[j].reqno = NO_REQNO;
623
        }
624
 
625
      /* Check to see which requests have been satisfied and which should
626
         be submitted now.  */
627
      for (j = 0; j < cache_queue.ix; ++j)
628
        {
629
          CACHE_QUEUE_ELEMENT *q = & cache_queue.q[j];
630
          if (! q->active)
631
            continue;
632
 
633
          /* If a load has been satisfied, complete the operation and remove it
634
             from the queue.  */
635
          if (request_complete (cpu, q))
636
            {
637
              remove_cache_queue_element (cpu, j);
638
              --j;
639
              continue;
640
            }
641
 
642
          /* Decrease the cycle count of each queued request.
643
             Submit a request for each queued request whose cycle count has
644
             become zero.  */
645
          --q->cycles;
646
          if (q->cycles == 0)
647
            submit_cache_request (q);
648
        }
649
    }
650
}
651
 
652
static void
653
apply_latency_adjustments (SIM_CPU *cpu)
654
{
655
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
656
  int i;
657
  /* update the latencies of the registers.  */
658
  int *fr  = ps->fr_busy;
659
  int *acc = ps->acc_busy;
660
  for (i = 0; i < 64; ++i)
661
    {
662
      if (ps->fr_busy_adjust[i] > 0)
663
        *fr -= ps->fr_busy_adjust[i]; /* OK if it goes negative.  */
664
      if (ps->acc_busy_adjust[i] > 0)
665
        *acc -= ps->acc_busy_adjust[i]; /* OK if it goes negative.  */
666
      ++fr;
667
      ++acc;
668
    }
669
}
670
 
671
/* Account for the number of cycles which have just passed in the latency of
672
   various system elements.  Works for negative cycles too so that latency
673
   can be extended in the case of insn fetch latency.
674
   If negative or zero, then no adjustment is necessary.  */
675
static void
676
update_latencies (SIM_CPU *cpu, int cycles)
677
{
678
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
679
  int i;
680
  /* update the latencies of the registers.  */
681
  int *fdiv;
682
  int *fsqrt;
683
  int *idiv;
684
  int *flt;
685
  int *media;
686
  int *ccr;
687
  int *gr  = ps->gr_busy;
688
  int *fr  = ps->fr_busy;
689
  int *acc = ps->acc_busy;
690
  int *spr;
691
  /* This loop handles GR, FR and ACC registers.  */
692
  for (i = 0; i < 64; ++i)
693
    {
694
      if (*gr <= cycles)
695
        {
696
          *gr = 0;
697
          reset_gr_flags (cpu, i);
698
        }
699
      else
700
        *gr -= cycles;
701
      /* If the busy drops to 0, then mark the register as
702
         "not in use".  */
703
      if (*fr <= cycles)
704
        {
705
          int *fr_lat = ps->fr_latency + i;
706
          *fr = 0;
707
          ps->fr_busy_adjust[i] = 0;
708
          /* Only clear flags if this register has no target latency.  */
709
          if (*fr_lat == 0)
710
            reset_fr_flags (cpu, i);
711
        }
712
      else
713
        *fr -= cycles;
714
      /* If the busy drops to 0, then mark the register as
715
         "not in use".  */
716
      if (*acc <= cycles)
717
        {
718
          int *acc_lat = ps->acc_latency + i;
719
          *acc = 0;
720
          ps->acc_busy_adjust[i] = 0;
721
          /* Only clear flags if this register has no target latency.  */
722
          if (*acc_lat == 0)
723
            reset_acc_flags (cpu, i);
724
        }
725
      else
726
        *acc -= cycles;
727
      ++gr;
728
      ++fr;
729
      ++acc;
730
    }
731
  /* This loop handles CCR registers.  */
732
  ccr = ps->ccr_busy;
733
  for (i = 0; i < 8; ++i)
734
    {
735
      if (*ccr <= cycles)
736
        {
737
          *ccr = 0;
738
          reset_cc_flags (cpu, i);
739
        }
740
      else
741
        *ccr -= cycles;
742
      ++ccr;
743
    }
744
  /* This loop handles SPR registers.  */
745
  spr = ps->spr_busy;
746
  for (i = 0; i < 4096; ++i)
747
    {
748
      if (*spr <= cycles)
749
        *spr = 0;
750
      else
751
        *spr -= cycles;
752
      ++spr;
753
    }
754
  /* This loop handles resources.  */
755
  idiv = ps->idiv_busy;
756
  fdiv = ps->fdiv_busy;
757
  fsqrt = ps->fsqrt_busy;
758
  for (i = 0; i < 2; ++i)
759
    {
760
      *idiv = (*idiv <= cycles) ? 0 : (*idiv - cycles);
761
      *fdiv = (*fdiv <= cycles) ? 0 : (*fdiv - cycles);
762
      *fsqrt = (*fsqrt <= cycles) ? 0 : (*fsqrt - cycles);
763
      ++idiv;
764
      ++fdiv;
765
      ++fsqrt;
766
    }
767
  /* Float and media units can occur in 4 slots on some machines.  */
768
  flt = ps->float_busy;
769
  media = ps->media_busy;
770
  for (i = 0; i < 4; ++i)
771
    {
772
      *flt = (*flt <= cycles) ? 0 : (*flt - cycles);
773
      *media = (*media <= cycles) ? 0 : (*media - cycles);
774
      ++flt;
775
      ++media;
776
    }
777
}
778
 
779
/* Print information about the wait for the given number of cycles.  */
780
void
781
frv_model_trace_wait_cycles (SIM_CPU *cpu, int cycles, const char *hazard_name)
782
{
783
  if (TRACE_INSN_P (cpu) && cycles > 0)
784
    {
785
      SIM_DESC sd = CPU_STATE (cpu);
786
      trace_printf (sd, cpu, "**** %s wait %d cycles ***\n",
787
                    hazard_name, cycles);
788
    }
789
}
790
 
791
void
792
trace_vliw_wait_cycles (SIM_CPU *cpu)
793
{
794
  if (TRACE_INSN_P (cpu))
795
    {
796
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
797
      frv_model_trace_wait_cycles (cpu, ps->vliw_wait, hazard_name);
798
    }
799
}
800
 
801
/* Wait for the given number of cycles.  */
802
void
803
frv_model_advance_cycles (SIM_CPU *cpu, int cycles)
804
{
805
  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
806
  update_latencies (cpu, cycles);
807
  run_caches (cpu, cycles);
808
  PROFILE_MODEL_TOTAL_CYCLES (p) += cycles;
809
}
810
 
811
void
812
handle_resource_wait (SIM_CPU *cpu)
813
{
814
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
815
  if (ps->vliw_wait != 0)
816
    frv_model_advance_cycles (cpu, ps->vliw_wait);
817
  if (ps->vliw_load_stall > ps->vliw_wait)
818
    ps->vliw_load_stall -= ps->vliw_wait;
819
  else
820
    ps->vliw_load_stall = 0;
821
}
822
 
823
/* Account for the number of cycles until these resources will be available
824
   again.  */
825
static void
826
update_target_latencies (SIM_CPU *cpu)
827
{
828
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
829
  int i;
830
  /* update the latencies of the registers.  */
831
  int *ccr_lat;
832
  int *gr_lat  = ps->gr_latency;
833
  int *fr_lat  = ps->fr_latency;
834
  int *acc_lat = ps->acc_latency;
835
  int *spr_lat;
836
  int *ccr;
837
  int *gr = ps->gr_busy;
838
  int  *fr = ps->fr_busy;
839
  int  *acc = ps->acc_busy;
840
  int *spr;
841
  /* This loop handles GR, FR and ACC registers.  */
842
  for (i = 0; i < 64; ++i)
843
    {
844
      if (*gr_lat)
845
        {
846
          *gr = *gr_lat;
847
          *gr_lat = 0;
848
        }
849
      if (*fr_lat)
850
        {
851
          *fr = *fr_lat;
852
          *fr_lat = 0;
853
        }
854
      if (*acc_lat)
855
        {
856
          *acc = *acc_lat;
857
          *acc_lat = 0;
858
        }
859
      ++gr; ++gr_lat;
860
      ++fr; ++fr_lat;
861
      ++acc; ++acc_lat;
862
    }
863
  /* This loop handles CCR registers.  */
864
  ccr = ps->ccr_busy;
865
  ccr_lat = ps->ccr_latency;
866
  for (i = 0; i < 8; ++i)
867
    {
868
      if (*ccr_lat)
869
        {
870
          *ccr = *ccr_lat;
871
          *ccr_lat = 0;
872
        }
873
      ++ccr; ++ccr_lat;
874
    }
875
  /* This loop handles SPR registers.  */
876
  spr = ps->spr_busy;
877
  spr_lat = ps->spr_latency;
878
  for (i = 0; i < 4096; ++i)
879
    {
880
      if (*spr_lat)
881
        {
882
          *spr = *spr_lat;
883
          *spr_lat = 0;
884
        }
885
      ++spr; ++spr_lat;
886
    }
887
}
888
 
889
/* Run the caches until all pending cache flushes are complete.  */
890
static void
891
wait_for_flush (SIM_CPU *cpu)
892
{
893
  SI address = CPU_LOAD_ADDRESS (cpu);
894
  int wait = 0;
895
  while (flush_pending_for_address (cpu, address))
896
    {
897
      frv_model_advance_cycles (cpu, 1);
898
      ++wait;
899
    }
900
  if (TRACE_INSN_P (cpu) && wait)
901
    {
902
      sprintf (hazard_name, "Data cache flush address %p:", address);
903
      frv_model_trace_wait_cycles (cpu, wait, hazard_name);
904
    }
905
}
906
 
907
/* Initialize cycle counting for an insn.
908
   FIRST_P is non-zero if this is the first insn in a set of parallel
909
   insns.  */
910
void
911
frvbf_model_insn_before (SIM_CPU *cpu, int first_p)
912
{
913
  SIM_DESC sd = CPU_STATE (cpu);
914
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
915
 
916
  ps->vliw_wait = 0;
917
  ps->post_wait = 0;
918
  memset (ps->fr_busy_adjust, 0, sizeof (ps->fr_busy_adjust));
919
  memset (ps->acc_busy_adjust, 0, sizeof (ps->acc_busy_adjust));
920
 
921
  if (first_p)
922
    {
923
      ps->vliw_insns++;
924
      ps->vliw_cycles = 0;
925
      ps->vliw_branch_taken = 0;
926
      ps->vliw_load_stall = 0;
927
    }
928
 
929
  switch (STATE_ARCHITECTURE (sd)->mach)
930
    {
931
    case bfd_mach_fr400:
932
    case bfd_mach_fr450:
933
      fr400_model_insn_before (cpu, first_p);
934
      break;
935
    case bfd_mach_fr500:
936
      fr500_model_insn_before (cpu, first_p);
937
      break;
938
    case bfd_mach_fr550:
939
      fr550_model_insn_before (cpu, first_p);
940
      break;
941
    default:
942
      break;
943
    }
944
 
945
  if (first_p)
946
    wait_for_flush (cpu);
947
}
948
 
949
/* Record the cycles computed for an insn.
950
   LAST_P is non-zero if this is the last insn in a set of parallel insns,
951
   and we update the total cycle count.
952
   CYCLES is the cycle count of the insn.  */
953
 
954
void
955
frvbf_model_insn_after (SIM_CPU *cpu, int last_p, int cycles)
956
{
957
  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
958
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
959
  SIM_DESC sd = CPU_STATE (cpu);
960
 
961
  PROFILE_MODEL_CUR_INSN_CYCLES (p) = cycles;
962
 
963
  /* The number of cycles for a VLIW insn is the maximum number of cycles
964
     used by any individual insn within it.  */
965
  if (cycles > ps->vliw_cycles)
966
    ps->vliw_cycles = cycles;
967
 
968
  if (last_p)
969
    {
970
      /*  This is the last insn in a VLIW insn.  */
971
      struct frv_interrupt_timer *timer = & frv_interrupt_state.timer;
972
 
973
      activate_cache_requests (cpu); /* before advancing cycles.  */
974
      apply_latency_adjustments (cpu); /* must go first.  */
975
      update_target_latencies (cpu); /* must go next.  */
976
      frv_model_advance_cycles (cpu, ps->vliw_cycles);
977
 
978
      PROFILE_MODEL_LOAD_STALL_CYCLES (p) += ps->vliw_load_stall;
979
 
980
      /* Check the interrupt timer.  cycles contains the total cycle count.  */
981
      if (timer->enabled)
982
        {
983
          cycles = PROFILE_MODEL_TOTAL_CYCLES (p);
984
          if (timer->current % timer->value
985
              + (cycles - timer->current) >= timer->value)
986
            frv_queue_external_interrupt (cpu, timer->interrupt);
987
          timer->current = cycles;
988
        }
989
 
990
      ps->past_first_p = 0; /* Next one will be the first in a new VLIW.  */
991
      ps->branch_address = -1;
992
    }
993
  else
994
    ps->past_first_p = 1;
995
 
996
  switch (STATE_ARCHITECTURE (sd)->mach)
997
    {
998
    case bfd_mach_fr400:
999
    case bfd_mach_fr450:
1000
      fr400_model_insn_after (cpu, last_p, cycles);
1001
      break;
1002
    case bfd_mach_fr500:
1003
      fr500_model_insn_after (cpu, last_p, cycles);
1004
      break;
1005
    case bfd_mach_fr550:
1006
      fr550_model_insn_after (cpu, last_p, cycles);
1007
      break;
1008
    default:
1009
      break;
1010
    }
1011
}
1012
 
1013
USI
1014
frvbf_model_branch (SIM_CPU *current_cpu, PCADDR target, int hint)
1015
{
1016
  /* Record the hint and branch address for use in profiling.  */
1017
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1018
  ps->branch_hint = hint;
1019
  ps->branch_address = target;
1020
}
1021
 
1022
/* Top up the latency of the given GR by the given number of cycles.  */
1023
void
1024
update_GR_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1025
{
1026
  if (out_GR >= 0)
1027
    {
1028
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1029
      int *gr = ps->gr_latency;
1030
      if (gr[out_GR] < cycles)
1031
        gr[out_GR] = cycles;
1032
    }
1033
}
1034
 
1035
void
1036
decrease_GR_busy (SIM_CPU *cpu, INT in_GR, int cycles)
1037
{
1038
  if (in_GR >= 0)
1039
    {
1040
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1041
      int *gr = ps->gr_busy;
1042
      gr[in_GR] -= cycles;
1043
    }
1044
}
1045
 
1046
/* Top up the latency of the given double GR by the number of cycles.  */
1047
void
1048
update_GRdouble_latency (SIM_CPU *cpu, INT out_GR, int cycles)
1049
{
1050
  if (out_GR >= 0)
1051
    {
1052
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1053
      int *gr = ps->gr_latency;
1054
      if (gr[out_GR] < cycles)
1055
        gr[out_GR] = cycles;
1056
      if (out_GR < 63 && gr[out_GR + 1] < cycles)
1057
        gr[out_GR + 1] = cycles;
1058
    }
1059
}
1060
 
1061
void
1062
update_GR_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1063
{
1064
  if (out_GR >= 0)
1065
    {
1066
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1067
      int *gr = ps->gr_latency;
1068
 
1069
      /* The latency of the GR will be at least the number of cycles used
1070
         by the insn.  */
1071
      if (gr[out_GR] < cycles)
1072
        gr[out_GR] = cycles;
1073
 
1074
      /* The latency will also depend on how long it takes to retrieve the
1075
         data from the cache or memory.  Assume that the load is issued
1076
         after the last cycle of the insn.  */
1077
      request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1078
    }
1079
}
1080
 
1081
void
1082
update_GRdouble_latency_for_load (SIM_CPU *cpu, INT out_GR, int cycles)
1083
{
1084
  if (out_GR >= 0)
1085
    {
1086
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1087
      int *gr = ps->gr_latency;
1088
 
1089
      /* The latency of the GR will be at least the number of cycles used
1090
         by the insn.  */
1091
      if (gr[out_GR] < cycles)
1092
        gr[out_GR] = cycles;
1093
      if (out_GR < 63 && gr[out_GR + 1] < cycles)
1094
        gr[out_GR + 1] = cycles;
1095
 
1096
      /* The latency will also depend on how long it takes to retrieve the
1097
         data from the cache or memory.  Assume that the load is issued
1098
         after the last cycle of the insn.  */
1099
      request_cache_load (cpu, out_GR, REGTYPE_NONE, cycles);
1100
    }
1101
}
1102
 
1103
void
1104
update_GR_latency_for_swap (SIM_CPU *cpu, INT out_GR, int cycles)
1105
{
1106
  update_GR_latency_for_load (cpu, out_GR, cycles);
1107
}
1108
 
1109
/* Top up the latency of the given FR by the given number of cycles.  */
1110
void
1111
update_FR_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1112
{
1113
  if (out_FR >= 0)
1114
    {
1115
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1116
      int *fr = ps->fr_latency;
1117
      if (fr[out_FR] < cycles)
1118
        fr[out_FR] = cycles;
1119
    }
1120
}
1121
 
1122
/* Top up the latency of the given double FR by the number of cycles.  */
1123
void
1124
update_FRdouble_latency (SIM_CPU *cpu, INT out_FR, int cycles)
1125
{
1126
  if (out_FR >= 0)
1127
    {
1128
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1129
      int *fr = ps->fr_latency;
1130
      if (fr[out_FR] < cycles)
1131
        fr[out_FR] = cycles;
1132
      if (out_FR < 63 && fr[out_FR + 1] < cycles)
1133
        fr[out_FR + 1] = cycles;
1134
    }
1135
}
1136
 
1137
void
1138
update_FR_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1139
{
1140
  if (out_FR >= 0)
1141
    {
1142
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1143
      int *fr = ps->fr_latency;
1144
 
1145
      /* The latency of the FR will be at least the number of cycles used
1146
         by the insn.  */
1147
      if (fr[out_FR] < cycles)
1148
        fr[out_FR] = cycles;
1149
 
1150
      /* The latency will also depend on how long it takes to retrieve the
1151
         data from the cache or memory.  Assume that the load is issued
1152
         after the last cycle of the insn.  */
1153
      request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1154
    }
1155
}
1156
 
1157
void
1158
update_FRdouble_latency_for_load (SIM_CPU *cpu, INT out_FR, int cycles)
1159
{
1160
  if (out_FR >= 0)
1161
    {
1162
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1163
      int *fr = ps->fr_latency;
1164
 
1165
      /* The latency of the FR will be at least the number of cycles used
1166
         by the insn.  */
1167
      if (fr[out_FR] < cycles)
1168
        fr[out_FR] = cycles;
1169
      if (out_FR < 63 && fr[out_FR + 1] < cycles)
1170
        fr[out_FR + 1] = cycles;
1171
 
1172
      /* The latency will also depend on how long it takes to retrieve the
1173
         data from the cache or memory.  Assume that the load is issued
1174
         after the last cycle of the insn.  */
1175
      request_cache_load (cpu, out_FR, REGTYPE_FR, cycles);
1176
    }
1177
}
1178
 
1179
/* Top up the post-processing time of the given FR by the given number of
1180
   cycles.  */
1181
void
1182
update_FR_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1183
{
1184
  if (out_FR >= 0)
1185
    {
1186
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1187
      /* If a load is pending on this register, then add the cycles to
1188
         the post processing time for this register. Otherwise apply it
1189
         directly to the latency of the register.  */
1190
      if (! load_pending_for_register (cpu, out_FR, 1, REGTYPE_FR))
1191
        {
1192
          int *fr = ps->fr_latency;
1193
          fr[out_FR] += cycles;
1194
        }
1195
      else
1196
        ps->fr_ptime[out_FR] += cycles;
1197
    }
1198
}
1199
 
1200
void
1201
update_FRdouble_ptime (SIM_CPU *cpu, INT out_FR, int cycles)
1202
{
1203
  if (out_FR >= 0)
1204
    {
1205
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1206
      /* If a load is pending on this register, then add the cycles to
1207
         the post processing time for this register. Otherwise apply it
1208
         directly to the latency of the register.  */
1209
      if (! load_pending_for_register (cpu, out_FR, 2, REGTYPE_FR))
1210
        {
1211
          int *fr = ps->fr_latency;
1212
          fr[out_FR] += cycles;
1213
          if (out_FR < 63)
1214
            fr[out_FR + 1] += cycles;
1215
        }
1216
      else
1217
        {
1218
          ps->fr_ptime[out_FR] += cycles;
1219
          if (out_FR < 63)
1220
            ps->fr_ptime[out_FR + 1] += cycles;
1221
        }
1222
    }
1223
}
1224
 
1225
/* Top up the post-processing time of the given ACC by the given number of
1226
   cycles.  */
1227
void
1228
update_ACC_ptime (SIM_CPU *cpu, INT out_ACC, int cycles)
1229
{
1230
  if (out_ACC >= 0)
1231
    {
1232
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1233
      /* No load can be pending on this register. Apply the cycles
1234
         directly to the latency of the register.  */
1235
      int *acc = ps->acc_latency;
1236
      acc[out_ACC] += cycles;
1237
    }
1238
}
1239
 
1240
/* Top up the post-processing time of the given SPR by the given number of
1241
   cycles.  */
1242
void
1243
update_SPR_ptime (SIM_CPU *cpu, INT out_SPR, int cycles)
1244
{
1245
  if (out_SPR >= 0)
1246
    {
1247
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1248
      /* No load can be pending on this register. Apply the cycles
1249
         directly to the latency of the register.  */
1250
      int *spr = ps->spr_latency;
1251
      spr[out_SPR] += cycles;
1252
    }
1253
}
1254
 
1255
void
1256
decrease_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1257
{
1258
  if (out_ACC >= 0)
1259
    {
1260
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1261
      int *acc = ps->acc_busy;
1262
      acc[out_ACC] -= cycles;
1263
      if (ps->acc_busy_adjust[out_ACC] >= 0
1264
          && cycles > ps->acc_busy_adjust[out_ACC])
1265
        ps->acc_busy_adjust[out_ACC] = cycles;
1266
    }
1267
}
1268
 
1269
void
1270
increase_ACC_busy (SIM_CPU *cpu, INT out_ACC, int cycles)
1271
{
1272
  if (out_ACC >= 0)
1273
    {
1274
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1275
      int *acc = ps->acc_busy;
1276
      acc[out_ACC] += cycles;
1277
    }
1278
}
1279
 
1280
void
1281
enforce_full_acc_latency (SIM_CPU *cpu, INT in_ACC)
1282
{
1283
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1284
  ps->acc_busy_adjust [in_ACC] = -1;
1285
}
1286
 
1287
void
1288
decrease_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1289
{
1290
  if (out_FR >= 0)
1291
    {
1292
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1293
      int *fr = ps->fr_busy;
1294
      fr[out_FR] -= cycles;
1295
      if (ps->fr_busy_adjust[out_FR] >= 0
1296
          && cycles > ps->fr_busy_adjust[out_FR])
1297
        ps->fr_busy_adjust[out_FR] = cycles;
1298
    }
1299
}
1300
 
1301
void
1302
increase_FR_busy (SIM_CPU *cpu, INT out_FR, int cycles)
1303
{
1304
  if (out_FR >= 0)
1305
    {
1306
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1307
      int *fr = ps->fr_busy;
1308
      fr[out_FR] += cycles;
1309
    }
1310
}
1311
 
1312
/* Top up the latency of the given ACC by the given number of cycles.  */
1313
void
1314
update_ACC_latency (SIM_CPU *cpu, INT out_ACC, int cycles)
1315
{
1316
  if (out_ACC >= 0)
1317
    {
1318
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1319
      int *acc = ps->acc_latency;
1320
      if (acc[out_ACC] < cycles)
1321
        acc[out_ACC] = cycles;
1322
    }
1323
}
1324
 
1325
/* Top up the latency of the given CCR by the given number of cycles.  */
1326
void
1327
update_CCR_latency (SIM_CPU *cpu, INT out_CCR, int cycles)
1328
{
1329
  if (out_CCR >= 0)
1330
    {
1331
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1332
      int *ccr = ps->ccr_latency;
1333
      if (ccr[out_CCR] < cycles)
1334
        ccr[out_CCR] = cycles;
1335
    }
1336
}
1337
 
1338
/* Top up the latency of the given SPR by the given number of cycles.  */
1339
void
1340
update_SPR_latency (SIM_CPU *cpu, INT out_SPR, int cycles)
1341
{
1342
  if (out_SPR >= 0)
1343
    {
1344
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1345
      int *spr = ps->spr_latency;
1346
      if (spr[out_SPR] < cycles)
1347
        spr[out_SPR] = cycles;
1348
    }
1349
}
1350
 
1351
/* Top up the latency of the given integer division resource by the given
1352
   number of cycles.  */
1353
void
1354
update_idiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1355
{
1356
  /* operate directly on the busy cycles since each resource can only
1357
     be used once in a VLIW insn.  */
1358
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1359
  int *r = ps->idiv_busy;
1360
  r[in_resource] = cycles;
1361
}
1362
 
1363
/* Set the latency of the given resource to the given number of cycles.  */
1364
void
1365
update_fdiv_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1366
{
1367
  /* operate directly on the busy cycles since each resource can only
1368
     be used once in a VLIW insn.  */
1369
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1370
  int *r = ps->fdiv_busy;
1371
  r[in_resource] = cycles;
1372
}
1373
 
1374
/* Set the latency of the given resource to the given number of cycles.  */
1375
void
1376
update_fsqrt_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1377
{
1378
  /* operate directly on the busy cycles since each resource can only
1379
     be used once in a VLIW insn.  */
1380
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1381
  int *r = ps->fsqrt_busy;
1382
  r[in_resource] = cycles;
1383
}
1384
 
1385
/* Set the latency of the given resource to the given number of cycles.  */
1386
void
1387
update_float_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1388
{
1389
  /* operate directly on the busy cycles since each resource can only
1390
     be used once in a VLIW insn.  */
1391
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1392
  int *r = ps->float_busy;
1393
  r[in_resource] = cycles;
1394
}
1395
 
1396
void
1397
update_media_resource_latency (SIM_CPU *cpu, INT in_resource, int cycles)
1398
{
1399
  /* operate directly on the busy cycles since each resource can only
1400
     be used once in a VLIW insn.  */
1401
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1402
  int *r = ps->media_busy;
1403
  r[in_resource] = cycles;
1404
}
1405
 
1406
/* Set the branch penalty to the given number of cycles.  */
1407
void
1408
update_branch_penalty (SIM_CPU *cpu, int cycles)
1409
{
1410
  /* operate directly on the busy cycles since only one branch can occur
1411
     in a VLIW insn.  */
1412
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1413
  ps->branch_penalty = cycles;
1414
}
1415
 
1416
/* Check the availability of the given GR register and update the number
1417
   of cycles the current VLIW insn must wait until it is available.  */
1418
void
1419
vliw_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1420
{
1421
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1422
  int *gr = ps->gr_busy;
1423
  /* If the latency of the register is greater than the current wait
1424
     then update the current wait.  */
1425
  if (in_GR >= 0 && gr[in_GR] > ps->vliw_wait)
1426
    {
1427
      if (TRACE_INSN_P (cpu))
1428
        sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1429
      ps->vliw_wait = gr[in_GR];
1430
    }
1431
}
1432
 
1433
/* Check the availability of the given GR register and update the number
1434
   of cycles the current VLIW insn must wait until it is available.  */
1435
void
1436
vliw_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1437
{
1438
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1439
  int *gr = ps->gr_busy;
1440
  /* If the latency of the register is greater than the current wait
1441
     then update the current wait.  */
1442
  if (in_GR >= 0)
1443
    {
1444
      if (gr[in_GR] > ps->vliw_wait)
1445
        {
1446
          if (TRACE_INSN_P (cpu))
1447
            sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1448
          ps->vliw_wait = gr[in_GR];
1449
        }
1450
      if (in_GR < 63 && gr[in_GR + 1] > ps->vliw_wait)
1451
        {
1452
          if (TRACE_INSN_P (cpu))
1453
            sprintf (hazard_name, "Data hazard for gr%d:", in_GR + 1);
1454
          ps->vliw_wait = gr[in_GR + 1];
1455
        }
1456
    }
1457
}
1458
 
1459
/* Check the availability of the given FR register and update the number
1460
   of cycles the current VLIW insn must wait until it is available.  */
1461
void
1462
vliw_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1463
{
1464
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1465
  int *fr = ps->fr_busy;
1466
  /* If the latency of the register is greater than the current wait
1467
     then update the current wait.  */
1468
  if (in_FR >= 0 && fr[in_FR] > ps->vliw_wait)
1469
    {
1470
      if (TRACE_INSN_P (cpu))
1471
        sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1472
      ps->vliw_wait = fr[in_FR];
1473
    }
1474
}
1475
 
1476
/* Check the availability of the given GR register and update the number
1477
   of cycles the current VLIW insn must wait until it is available.  */
1478
void
1479
vliw_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1480
{
1481
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1482
  int *fr = ps->fr_busy;
1483
  /* If the latency of the register is greater than the current wait
1484
     then update the current wait.  */
1485
  if (in_FR >= 0)
1486
    {
1487
      if (fr[in_FR] > ps->vliw_wait)
1488
        {
1489
          if (TRACE_INSN_P (cpu))
1490
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1491
          ps->vliw_wait = fr[in_FR];
1492
        }
1493
      if (in_FR < 63 && fr[in_FR + 1] > ps->vliw_wait)
1494
        {
1495
          if (TRACE_INSN_P (cpu))
1496
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1497
          ps->vliw_wait = fr[in_FR + 1];
1498
        }
1499
    }
1500
}
1501
 
1502
/* Check the availability of the given CCR register and update the number
1503
   of cycles the current VLIW insn must wait until it is available.  */
1504
void
1505
vliw_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1506
{
1507
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1508
  int *ccr = ps->ccr_busy;
1509
  /* If the latency of the register is greater than the current wait
1510
     then update the current wait.  */
1511
  if (in_CCR >= 0 && ccr[in_CCR] > ps->vliw_wait)
1512
    {
1513
      if (TRACE_INSN_P (cpu))
1514
        {
1515
          if (in_CCR > 3)
1516
            sprintf (hazard_name, "Data hazard for icc%d:", in_CCR-4);
1517
          else
1518
            sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1519
        }
1520
      ps->vliw_wait = ccr[in_CCR];
1521
    }
1522
}
1523
 
1524
/* Check the availability of the given ACC register and update the number
1525
   of cycles the current VLIW insn must wait until it is available.  */
1526
void
1527
vliw_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1528
{
1529
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1530
  int *acc = ps->acc_busy;
1531
  /* If the latency of the register is greater than the current wait
1532
     then update the current wait.  */
1533
  if (in_ACC >= 0 && acc[in_ACC] > ps->vliw_wait)
1534
    {
1535
      if (TRACE_INSN_P (cpu))
1536
        sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1537
      ps->vliw_wait = acc[in_ACC];
1538
    }
1539
}
1540
 
1541
/* Check the availability of the given SPR register and update the number
1542
   of cycles the current VLIW insn must wait until it is available.  */
1543
void
1544
vliw_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1545
{
1546
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1547
  int *spr = ps->spr_busy;
1548
  /* If the latency of the register is greater than the current wait
1549
     then update the current wait.  */
1550
  if (in_SPR >= 0 && spr[in_SPR] > ps->vliw_wait)
1551
    {
1552
      if (TRACE_INSN_P (cpu))
1553
        sprintf (hazard_name, "Data hazard for spr %d:", in_SPR);
1554
      ps->vliw_wait = spr[in_SPR];
1555
    }
1556
}
1557
 
1558
/* Check the availability of the given integer division resource and update
1559
   the number of cycles the current VLIW insn must wait until it is available.
1560
*/
1561
void
1562
vliw_wait_for_idiv_resource (SIM_CPU *cpu, INT in_resource)
1563
{
1564
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1565
  int *r = ps->idiv_busy;
1566
  /* If the latency of the resource is greater than the current wait
1567
     then update the current wait.  */
1568
  if (r[in_resource] > ps->vliw_wait)
1569
    {
1570
      if (TRACE_INSN_P (cpu))
1571
        {
1572
          sprintf (hazard_name, "Resource hazard for integer division in slot I%d:", in_resource);
1573
        }
1574
      ps->vliw_wait = r[in_resource];
1575
    }
1576
}
1577
 
1578
/* Check the availability of the given float division resource and update
1579
   the number of cycles the current VLIW insn must wait until it is available.
1580
*/
1581
void
1582
vliw_wait_for_fdiv_resource (SIM_CPU *cpu, INT in_resource)
1583
{
1584
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1585
  int *r = ps->fdiv_busy;
1586
  /* If the latency of the resource is greater than the current wait
1587
     then update the current wait.  */
1588
  if (r[in_resource] > ps->vliw_wait)
1589
    {
1590
      if (TRACE_INSN_P (cpu))
1591
        {
1592
          sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", in_resource);
1593
        }
1594
      ps->vliw_wait = r[in_resource];
1595
    }
1596
}
1597
 
1598
/* Check the availability of the given float square root resource and update
1599
   the number of cycles the current VLIW insn must wait until it is available.
1600
*/
1601
void
1602
vliw_wait_for_fsqrt_resource (SIM_CPU *cpu, INT in_resource)
1603
{
1604
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1605
  int *r = ps->fsqrt_busy;
1606
  /* If the latency of the resource is greater than the current wait
1607
     then update the current wait.  */
1608
  if (r[in_resource] > ps->vliw_wait)
1609
    {
1610
      if (TRACE_INSN_P (cpu))
1611
        {
1612
          sprintf (hazard_name, "Resource hazard for square root in slot F%d:", in_resource);
1613
        }
1614
      ps->vliw_wait = r[in_resource];
1615
    }
1616
}
1617
 
1618
/* Check the availability of the given float unit resource and update
1619
   the number of cycles the current VLIW insn must wait until it is available.
1620
*/
1621
void
1622
vliw_wait_for_float_resource (SIM_CPU *cpu, INT in_resource)
1623
{
1624
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1625
  int *r = ps->float_busy;
1626
  /* If the latency of the resource is greater than the current wait
1627
     then update the current wait.  */
1628
  if (r[in_resource] > ps->vliw_wait)
1629
    {
1630
      if (TRACE_INSN_P (cpu))
1631
        {
1632
          sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", in_resource);
1633
        }
1634
      ps->vliw_wait = r[in_resource];
1635
    }
1636
}
1637
 
1638
/* Check the availability of the given media unit resource and update
1639
   the number of cycles the current VLIW insn must wait until it is available.
1640
*/
1641
void
1642
vliw_wait_for_media_resource (SIM_CPU *cpu, INT in_resource)
1643
{
1644
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1645
  int *r = ps->media_busy;
1646
  /* If the latency of the resource is greater than the current wait
1647
     then update the current wait.  */
1648
  if (r[in_resource] > ps->vliw_wait)
1649
    {
1650
      if (TRACE_INSN_P (cpu))
1651
        {
1652
          sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", in_resource);
1653
        }
1654
      ps->vliw_wait = r[in_resource];
1655
    }
1656
}
1657
 
1658
/* Run the caches until all requests for the given register(s) are satisfied. */
1659
void
1660
load_wait_for_GR (SIM_CPU *cpu, INT in_GR)
1661
{
1662
  if (in_GR >= 0)
1663
    {
1664
      int wait = 0;
1665
      while (load_pending_for_register (cpu, in_GR, 1/*words*/, REGTYPE_NONE))
1666
        {
1667
          frv_model_advance_cycles (cpu, 1);
1668
          ++wait;
1669
        }
1670
      if (wait)
1671
        {
1672
          FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1673
          ps->vliw_wait += wait;
1674
          ps->vliw_load_stall += wait;
1675
          if (TRACE_INSN_P (cpu))
1676
            sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1677
        }
1678
    }
1679
}
1680
 
1681
void
1682
load_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1683
{
1684
  if (in_FR >= 0)
1685
    {
1686
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1687
      int *fr;
1688
      int wait = 0;
1689
      while (load_pending_for_register (cpu, in_FR, 1/*words*/, REGTYPE_FR))
1690
        {
1691
          frv_model_advance_cycles (cpu, 1);
1692
          ++wait;
1693
        }
1694
      /* Post processing time may have been added to the register's
1695
         latency after the loads were processed. Account for that too.
1696
      */
1697
      fr = ps->fr_busy;
1698
      if (fr[in_FR])
1699
        {
1700
          wait += fr[in_FR];
1701
          frv_model_advance_cycles (cpu, fr[in_FR]);
1702
        }
1703
      /* Update the vliw_wait with the number of cycles we waited for the
1704
         load and any post-processing.  */
1705
      if (wait)
1706
        {
1707
          ps->vliw_wait += wait;
1708
          ps->vliw_load_stall += wait;
1709
          if (TRACE_INSN_P (cpu))
1710
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1711
        }
1712
    }
1713
}
1714
 
1715
void
1716
load_wait_for_GRdouble (SIM_CPU *cpu, INT in_GR)
1717
{
1718
  if (in_GR >= 0)
1719
    {
1720
      int wait = 0;
1721
      while (load_pending_for_register (cpu, in_GR, 2/*words*/, REGTYPE_NONE))
1722
        {
1723
          frv_model_advance_cycles (cpu, 1);
1724
          ++wait;
1725
        }
1726
      if (wait)
1727
        {
1728
          FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1729
          ps->vliw_wait += wait;
1730
          ps->vliw_load_stall += wait;
1731
          if (TRACE_INSN_P (cpu))
1732
            sprintf (hazard_name, "Data hazard for gr%d:", in_GR);
1733
        }
1734
    }
1735
}
1736
 
1737
void
1738
load_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1739
{
1740
  if (in_FR >= 0)
1741
    {
1742
      FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1743
      int *fr;
1744
      int wait = 0;
1745
      while (load_pending_for_register (cpu, in_FR, 2/*words*/, REGTYPE_FR))
1746
        {
1747
          frv_model_advance_cycles (cpu, 1);
1748
          ++wait;
1749
        }
1750
      /* Post processing time may have been added to the registers'
1751
         latencies after the loads were processed. Account for that too.
1752
      */
1753
      fr = ps->fr_busy;
1754
      if (fr[in_FR])
1755
        {
1756
          wait += fr[in_FR];
1757
          frv_model_advance_cycles (cpu, fr[in_FR]);
1758
        }
1759
      if (in_FR < 63)
1760
        {
1761
          if (fr[in_FR + 1])
1762
            {
1763
              wait += fr[in_FR + 1];
1764
              frv_model_advance_cycles (cpu, fr[in_FR + 1]);
1765
            }
1766
        }
1767
      /* Update the vliw_wait with the number of cycles we waited for the
1768
         load and any post-processing.  */
1769
      if (wait)
1770
        {
1771
          ps->vliw_wait += wait;
1772
          ps->vliw_load_stall += wait;
1773
          if (TRACE_INSN_P (cpu))
1774
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1775
        }
1776
    }
1777
}
1778
 
1779
void
1780
enforce_full_fr_latency (SIM_CPU *cpu, INT in_FR)
1781
{
1782
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1783
  ps->fr_busy_adjust [in_FR] = -1;
1784
}
1785
 
1786
/* Calculate how long the post processing for a floating point insn must
1787
   wait for resources to become available.  */
1788
int
1789
post_wait_for_FR (SIM_CPU *cpu, INT in_FR)
1790
{
1791
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1792
  int *fr = ps->fr_busy;
1793
 
1794
  if (in_FR >= 0 && fr[in_FR] > ps->post_wait)
1795
    {
1796
      ps->post_wait = fr[in_FR];
1797
      if (TRACE_INSN_P (cpu))
1798
        sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1799
    }
1800
}
1801
 
1802
/* Calculate how long the post processing for a floating point insn must
1803
   wait for resources to become available.  */
1804
int
1805
post_wait_for_FRdouble (SIM_CPU *cpu, INT in_FR)
1806
{
1807
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1808
  int *fr = ps->fr_busy;
1809
 
1810
  if (in_FR >= 0)
1811
    {
1812
      if (fr[in_FR] > ps->post_wait)
1813
        {
1814
          ps->post_wait = fr[in_FR];
1815
          if (TRACE_INSN_P (cpu))
1816
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR);
1817
        }
1818
      if (in_FR < 63 && fr[in_FR + 1] > ps->post_wait)
1819
        {
1820
          ps->post_wait = fr[in_FR + 1];
1821
          if (TRACE_INSN_P (cpu))
1822
            sprintf (hazard_name, "Data hazard for fr%d:", in_FR + 1);
1823
        }
1824
    }
1825
}
1826
 
1827
int
1828
post_wait_for_ACC (SIM_CPU *cpu, INT in_ACC)
1829
{
1830
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1831
  int *acc = ps->acc_busy;
1832
 
1833
  if (in_ACC >= 0 && acc[in_ACC] > ps->post_wait)
1834
    {
1835
      ps->post_wait = acc[in_ACC];
1836
      if (TRACE_INSN_P (cpu))
1837
        sprintf (hazard_name, "Data hazard for acc%d:", in_ACC);
1838
    }
1839
}
1840
 
1841
int
1842
post_wait_for_CCR (SIM_CPU *cpu, INT in_CCR)
1843
{
1844
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1845
  int *ccr = ps->ccr_busy;
1846
 
1847
  if (in_CCR >= 0 && ccr[in_CCR] > ps->post_wait)
1848
    {
1849
      ps->post_wait = ccr[in_CCR];
1850
      if (TRACE_INSN_P (cpu))
1851
        {
1852
          if (in_CCR > 3)
1853
            sprintf (hazard_name, "Data hazard for icc%d:", in_CCR - 4);
1854
          else
1855
            sprintf (hazard_name, "Data hazard for fcc%d:", in_CCR);
1856
        }
1857
    }
1858
}
1859
 
1860
int
1861
post_wait_for_SPR (SIM_CPU *cpu, INT in_SPR)
1862
{
1863
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1864
  int *spr = ps->spr_busy;
1865
 
1866
  if (in_SPR >= 0 && spr[in_SPR] > ps->post_wait)
1867
    {
1868
      ps->post_wait = spr[in_SPR];
1869
      if (TRACE_INSN_P (cpu))
1870
        sprintf (hazard_name, "Data hazard for spr[%d]:", in_SPR);
1871
    }
1872
}
1873
 
1874
int
1875
post_wait_for_fdiv (SIM_CPU *cpu, INT slot)
1876
{
1877
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1878
  int *fdiv = ps->fdiv_busy;
1879
 
1880
  /* Multiple floating point divisions in the same slot need only wait 1
1881
     extra cycle.  */
1882
  if (fdiv[slot] > 0 && 1 > ps->post_wait)
1883
    {
1884
      ps->post_wait = 1;
1885
      if (TRACE_INSN_P (cpu))
1886
        {
1887
          sprintf (hazard_name, "Resource hazard for floating point division in slot F%d:", slot);
1888
        }
1889
    }
1890
}
1891
 
1892
int
1893
post_wait_for_fsqrt (SIM_CPU *cpu, INT slot)
1894
{
1895
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1896
  int *fsqrt = ps->fsqrt_busy;
1897
 
1898
  /* Multiple floating point square roots in the same slot need only wait 1
1899
     extra cycle.  */
1900
  if (fsqrt[slot] > 0 && 1 > ps->post_wait)
1901
    {
1902
      ps->post_wait = 1;
1903
      if (TRACE_INSN_P (cpu))
1904
        {
1905
          sprintf (hazard_name, "Resource hazard for square root in slot F%d:", slot);
1906
        }
1907
    }
1908
}
1909
 
1910
int
1911
post_wait_for_float (SIM_CPU *cpu, INT slot)
1912
{
1913
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1914
  int *flt = ps->float_busy;
1915
 
1916
  /* Multiple floating point square roots in the same slot need only wait 1
1917
     extra cycle.  */
1918
  if (flt[slot] > ps->post_wait)
1919
    {
1920
      ps->post_wait = flt[slot];
1921
      if (TRACE_INSN_P (cpu))
1922
        {
1923
          sprintf (hazard_name, "Resource hazard for floating point unit in slot F%d:", slot);
1924
        }
1925
    }
1926
}
1927
 
1928
int
1929
post_wait_for_media (SIM_CPU *cpu, INT slot)
1930
{
1931
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1932
  int *media = ps->media_busy;
1933
 
1934
  /* Multiple floating point square roots in the same slot need only wait 1
1935
     extra cycle.  */
1936
  if (media[slot] > ps->post_wait)
1937
    {
1938
      ps->post_wait = media[slot];
1939
      if (TRACE_INSN_P (cpu))
1940
        {
1941
          sprintf (hazard_name, "Resource hazard for media unit in slot M%d:", slot);
1942
        }
1943
    }
1944
}
1945
 
1946
/* Print cpu-specific profile information.  */
1947
#define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1948
 
1949
static void
1950
print_cache (SIM_CPU *cpu, FRV_CACHE *cache, const char *cache_name)
1951
{
1952
  SIM_DESC sd = CPU_STATE (cpu);
1953
 
1954
  if (cache != NULL)
1955
    {
1956
      char comma_buf[20];
1957
      unsigned accesses;
1958
 
1959
      sim_io_printf (sd, "  %s Cache\n\n", cache_name);
1960
      accesses = cache->statistics.accesses;
1961
      sim_io_printf (sd, "    Total accesses:  %s\n", COMMAS (accesses));
1962
      if (accesses != 0)
1963
        {
1964
          float rate;
1965
          unsigned hits = cache->statistics.hits;
1966
          sim_io_printf (sd, "    Hits:            %s\n", COMMAS (hits));
1967
          rate = (float)hits / accesses;
1968
          sim_io_printf (sd, "    Hit rate:        %.2f%%\n", rate * 100);
1969
        }
1970
    }
1971
  else
1972
    sim_io_printf (sd, "  Model %s has no %s cache\n",
1973
                   MODEL_NAME (CPU_MODEL (cpu)), cache_name);
1974
 
1975
  sim_io_printf (sd, "\n");
1976
}
1977
 
1978
/* This table must correspond to the UNIT_ATTR table in
1979
   opcodes/frv-desc.h. Only the units up to UNIT_C need be
1980
   listed since the others cannot occur after mapping.  */
1981
static char *
1982
slot_names[] =
1983
{
1984
  "none",
1985
  "I0", "I1", "I01", "I2", "I3", "IALL",
1986
  "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1987
  "B0", "B1", "B01",
1988
  "C"
1989
};
1990
 
1991
static void
1992
print_parallel (SIM_CPU *cpu, int verbose)
1993
{
1994
  SIM_DESC sd = CPU_STATE (cpu);
1995
  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
1996
  FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (cpu);
1997
  unsigned total, vliw;
1998
  char comma_buf[20];
1999
  float average;
2000
 
2001
  sim_io_printf (sd, "Model %s Parallelization\n\n",
2002
                 MODEL_NAME (CPU_MODEL (cpu)));
2003
 
2004
  total = PROFILE_TOTAL_INSN_COUNT (p);
2005
  sim_io_printf (sd, "  Total instructions:           %s\n", COMMAS (total));
2006
  vliw = ps->vliw_insns;
2007
  sim_io_printf (sd, "  VLIW instructions:            %s\n", COMMAS (vliw));
2008
  average = (float)total / vliw;
2009
  sim_io_printf (sd, "  Average VLIW length:          %.2f\n", average);
2010
  average = (float)PROFILE_MODEL_TOTAL_CYCLES (p) / vliw;
2011
  sim_io_printf (sd, "  Cycles per VLIW instruction:  %.2f\n", average);
2012
  average = (float)total / PROFILE_MODEL_TOTAL_CYCLES (p);
2013
  sim_io_printf (sd, "  Instructions per cycle:       %.2f\n", average);
2014
 
2015
  if (verbose)
2016
    {
2017
      int i;
2018
      int max_val = 0;
2019
      int max_name_len = 0;
2020
      for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2021
        {
2022
          if (INSNS_IN_SLOT (i))
2023
            {
2024
              int len;
2025
              if (INSNS_IN_SLOT (i) > max_val)
2026
                max_val = INSNS_IN_SLOT (i);
2027
              len = strlen (slot_names[i]);
2028
              if (len > max_name_len)
2029
                max_name_len = len;
2030
            }
2031
        }
2032
      if (max_val > 0)
2033
        {
2034
          sim_io_printf (sd, "\n");
2035
          sim_io_printf (sd, "  Instructions per slot:\n");
2036
          sim_io_printf (sd, "\n");
2037
          for (i = UNIT_NIL + 1; i < UNIT_NUM_UNITS; ++i)
2038
            {
2039
              if (INSNS_IN_SLOT (i) != 0)
2040
                {
2041
                  sim_io_printf (sd, "  %*s: %*s: ",
2042
                                 max_name_len, slot_names[i],
2043
                                 max_val < 10000 ? 5 : 10,
2044
                                 COMMAS (INSNS_IN_SLOT (i)));
2045
                  sim_profile_print_bar (sd, PROFILE_HISTOGRAM_WIDTH,
2046
                                         INSNS_IN_SLOT (i),
2047
                                         max_val);
2048
                  sim_io_printf (sd, "\n");
2049
                }
2050
            }
2051
        } /* details to print */
2052
    } /* verbose */
2053
 
2054
  sim_io_printf (sd, "\n");
2055
}
2056
 
2057
void
2058
frv_profile_info (SIM_CPU *cpu, int verbose)
2059
{
2060
  /* FIXME: Need to add smp support.  */
2061
  PROFILE_DATA *p = CPU_PROFILE_DATA (cpu);
2062
 
2063
#if WITH_PROFILE_PARALLEL_P
2064
  if (PROFILE_FLAGS (p) [PROFILE_PARALLEL_IDX])
2065
    print_parallel (cpu, verbose);
2066
#endif
2067
 
2068
#if WITH_PROFILE_CACHE_P
2069
  if (PROFILE_FLAGS (p) [PROFILE_CACHE_IDX])
2070
    {
2071
      SIM_DESC sd = CPU_STATE (cpu);
2072
      sim_io_printf (sd, "Model %s Cache Statistics\n\n",
2073
                     MODEL_NAME (CPU_MODEL (cpu)));
2074
      print_cache (cpu, CPU_INSN_CACHE (cpu), "Instruction");
2075
      print_cache (cpu, CPU_DATA_CACHE (cpu), "Data");
2076
    }
2077
#endif /* WITH_PROFILE_CACHE_P */
2078
}
2079
 
2080
/* A hack to get registers referenced for profiling.  */
2081
SI frv_ref_SI (SI ref) {return ref;}
2082
#endif /* WITH_PROFILE_MODEL_P */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.