OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [gdb-6.8/] [sim/] [ppc/] [events.c] - Blame information for rev 819

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 24 jeremybenn
/*  This file is part of the program psim.
2
 
3
    Copyright (C) 1994-1998, Andrew Cagney <cagney@highland.com.au>
4
 
5
    This program is free software; you can redistribute it and/or modify
6
    it under the terms of the GNU General Public License as published by
7
    the Free Software Foundation; either version 2 of the License, or
8
    (at your option) any later version.
9
 
10
    This program is distributed in the hope that it will be useful,
11
    but WITHOUT ANY WARRANTY; without even the implied warranty of
12
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
    GNU General Public License for more details.
14
 
15
    You should have received a copy of the GNU General Public License
16
    along with this program; if not, write to the Free Software
17
    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
 
19
    */
20
 
21
 
22
#ifndef _EVENTS_C_
23
#define _EVENTS_C_
24
 
25
#include "basics.h"
26
#include "events.h"
27
 
28
#include <signal.h>
29
 
30
#if !defined (SIM_EVENTS_POLL_RATE)
31
#define SIM_EVENTS_POLL_RATE 0x1000
32
#endif
33
 
34
 
35
 
36
/* The event queue maintains a single absolute time using two
37
   variables.
38
 
39
   TIME_OF_EVENT: this holds the time at which the next event is ment
40
   to occure.  If no next event it will hold the time of the last
41
   event.
42
 
43
   TIME_FROM_EVENT: The current distance from TIME_OF_EVENT.  If an
44
   event is pending, this will be positive.  If no future event is
45
   pending this will be negative.  This variable is decremented once
46
   for each iteration of a clock cycle.
47
 
48
   Initially, the clock is started at time one (1) with TIME_OF_EVENT
49
   == 0 and TIME_FROM_EVENT == -1.
50
 
51
   Clearly there is a bug in that this code assumes that the absolute
52
   time counter will never become greater than 2^62. */
53
 
54
typedef struct _event_entry event_entry;
55
struct _event_entry {
56
  void *data;
57
  event_handler *handler;
58
  signed64 time_of_event;
59
  event_entry *next;
60
};
61
 
62
struct _event_queue {
63
  int processing;
64
  event_entry *queue;
65
  event_entry *volatile held;
66
  event_entry *volatile *volatile held_end;
67
  signed64 time_of_event;
68
  signed64 time_from_event;
69
};
70
 
71
 
72
STATIC_INLINE_EVENTS\
73
(void)
74
sim_events_poll (void *data)
75
{
76
  event_queue *queue = data;
77
  /* just re-schedule in 1000 million ticks time */
78
  event_queue_schedule (queue, SIM_EVENTS_POLL_RATE, sim_events_poll, queue);
79
  sim_io_poll_quit ();
80
}
81
 
82
 
83
INLINE_EVENTS\
84
(event_queue *)
85
event_queue_create(void)
86
{
87
  event_queue *new_event_queue = ZALLOC(event_queue);
88
 
89
  new_event_queue->processing = 0;
90
  new_event_queue->queue = NULL;
91
  new_event_queue->held = NULL;
92
  new_event_queue->held_end = &new_event_queue->held;
93
 
94
  /* both times are already zero */
95
  return new_event_queue;
96
}
97
 
98
 
99
INLINE_EVENTS\
100
(void)
101
event_queue_init(event_queue *queue)
102
{
103
  event_entry *event;
104
 
105
  /* drain the interrupt queue */
106
  {
107
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
108
    sigset_t old_mask;
109
    sigset_t new_mask;
110
    sigfillset(&new_mask);
111
    /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
112
#endif
113
    event = queue->held;
114
    while (event != NULL) {
115
      event_entry *dead = event;
116
      event = event->next;
117
      zfree(dead);
118
    }
119
    queue->held = NULL;
120
    queue->held_end = &queue->held;
121
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
122
    /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
123
#endif
124
  }
125
 
126
  /* drain the normal queue */
127
  event = queue->queue;
128
  while (event != NULL) {
129
    event_entry *dead = event;
130
    event = event->next;
131
    zfree(dead);
132
  }
133
  queue->queue = NULL;
134
 
135
  /* wind time back to one */
136
  queue->processing = 0;
137
  queue->time_of_event = 0;
138
  queue->time_from_event = -1;
139
 
140
  /* schedule our initial counter event */
141
  event_queue_schedule (queue, 0, sim_events_poll, queue);
142
}
143
 
144
INLINE_EVENTS\
145
(signed64)
146
event_queue_time(event_queue *queue)
147
{
148
  return queue->time_of_event - queue->time_from_event;
149
}
150
 
151
STATIC_INLINE_EVENTS\
152
(void)
153
update_time_from_event(event_queue *events)
154
{
155
  signed64 current_time = event_queue_time(events);
156
  if (events->queue != NULL) {
157
    events->time_from_event = (events->queue->time_of_event - current_time);
158
    events->time_of_event = events->queue->time_of_event;
159
  }
160
  else {
161
    events->time_of_event = current_time - 1;
162
    events->time_from_event = -1;
163
  }
164
  if (WITH_TRACE && ppc_trace[trace_events])
165
    {
166
      event_entry *event;
167
      int i;
168
      for (event = events->queue, i = 0;
169
           event != NULL;
170
           event = event->next, i++)
171
        {
172
          TRACE(trace_events, ("event time-from-event - time %ld, delta %ld - event %d, tag 0x%lx, time %ld, handler 0x%lx, data 0x%lx\n",
173
                               (long)current_time,
174
                               (long)events->time_from_event,
175
                               i,
176
                               (long)event,
177
                               (long)event->time_of_event,
178
                               (long)event->handler,
179
                               (long)event->data));
180
        }
181
    }
182
  ASSERT(current_time == event_queue_time(events));
183
}
184
 
185
STATIC_INLINE_EVENTS\
186
(void)
187
insert_event_entry(event_queue *events,
188
                   event_entry *new_event,
189
                   signed64 delta)
190
{
191
  event_entry *curr;
192
  event_entry **prev;
193
  signed64 time_of_event;
194
 
195
  if (delta < 0)
196
    error("what is past is past!\n");
197
 
198
  /* compute when the event should occure */
199
  time_of_event = event_queue_time(events) + delta;
200
 
201
  /* find the queue insertion point - things are time ordered */
202
  prev = &events->queue;
203
  curr = events->queue;
204
  while (curr != NULL && time_of_event >= curr->time_of_event) {
205
    ASSERT(curr->next == NULL
206
           || curr->time_of_event <= curr->next->time_of_event);
207
    prev = &curr->next;
208
    curr = curr->next;
209
  }
210
  ASSERT(curr == NULL || time_of_event < curr->time_of_event);
211
 
212
  /* insert it */
213
  new_event->next = curr;
214
  *prev = new_event;
215
  new_event->time_of_event = time_of_event;
216
 
217
  /* adjust the time until the first event */
218
  update_time_from_event(events);
219
}
220
 
221
INLINE_EVENTS\
222
(event_entry_tag)
223
event_queue_schedule(event_queue *events,
224
                     signed64 delta_time,
225
                     event_handler *handler,
226
                     void *data)
227
{
228
  event_entry *new_event = ZALLOC(event_entry);
229
  new_event->data = data;
230
  new_event->handler = handler;
231
  insert_event_entry(events, new_event, delta_time);
232
  TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
233
                       (long)event_queue_time(events),
234
                       (long)new_event,
235
                       (long)new_event->time_of_event,
236
                       (long)new_event->handler,
237
                       (long)new_event->data));
238
  return (event_entry_tag)new_event;
239
}
240
 
241
 
242
INLINE_EVENTS\
243
(event_entry_tag)
244
event_queue_schedule_after_signal(event_queue *events,
245
                                  signed64 delta_time,
246
                                  event_handler *handler,
247
                                  void *data)
248
{
249
  event_entry *new_event = ZALLOC(event_entry);
250
 
251
  new_event->data = data;
252
  new_event->handler = handler;
253
  new_event->time_of_event = delta_time; /* work it out later */
254
  new_event->next = NULL;
255
 
256
  {
257
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
258
    sigset_t old_mask;
259
    sigset_t new_mask;
260
    sigfillset(&new_mask);
261
    /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
262
#endif
263
    if (events->held == NULL) {
264
      events->held = new_event;
265
    }
266
    else {
267
      *events->held_end = new_event;
268
    }
269
    events->held_end = &new_event->next;
270
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
271
    /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
272
#endif
273
  }
274
 
275
  TRACE(trace_events, ("event scheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
276
                       (long)event_queue_time(events),
277
                       (long)new_event,
278
                       (long)new_event->time_of_event,
279
                       (long)new_event->handler,
280
                       (long)new_event->data));
281
 
282
  return (event_entry_tag)new_event;
283
}
284
 
285
 
286
INLINE_EVENTS\
287
(void)
288
event_queue_deschedule(event_queue *events,
289
                       event_entry_tag event_to_remove)
290
{
291
  event_entry *to_remove = (event_entry*)event_to_remove;
292
  ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
293
  if (event_to_remove != NULL) {
294
    event_entry *current;
295
    event_entry **ptr_to_current;
296
    for (ptr_to_current = &events->queue, current = *ptr_to_current;
297
         current != NULL && current != to_remove;
298
         ptr_to_current = &current->next, current = *ptr_to_current);
299
    if (current == to_remove) {
300
      *ptr_to_current = current->next;
301
      TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
302
                           (long)event_queue_time(events),
303
                           (long)event_to_remove,
304
                           (long)current->time_of_event,
305
                           (long)current->handler,
306
                           (long)current->data));
307
      zfree(current);
308
      update_time_from_event(events);
309
    }
310
    else {
311
      TRACE(trace_events, ("event descheduled at %ld - tag 0x%lx - not found\n",
312
                           (long)event_queue_time(events),
313
                           (long)event_to_remove));
314
    }
315
  }
316
  ASSERT((events->time_from_event >= 0) == (events->queue != NULL));
317
}
318
 
319
 
320
 
321
 
322
INLINE_EVENTS\
323
(int)
324
event_queue_tick(event_queue *events)
325
{
326
  signed64 time_from_event;
327
 
328
  /* we should only be here when the previous tick has been fully processed */
329
  ASSERT(!events->processing);
330
 
331
  /* move any events that were queued by any signal handlers onto the
332
     real event queue.  BTW: When inlining, having this code here,
333
     instead of in event_queue_process() causes GCC to put greater
334
     weight on keeping the pointer EVENTS in a register.  This, in
335
     turn results in better code being output. */
336
  if (events->held != NULL) {
337
    event_entry *held_events;
338
    event_entry *curr_event;
339
 
340
    {
341
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
342
      sigset_t old_mask;
343
      sigset_t new_mask;
344
      sigfillset(&new_mask);
345
      /*-LOCK-*/ sigprocmask(SIG_SETMASK, &new_mask, &old_mask);
346
#endif
347
      held_events = events->held;
348
      events->held = NULL;
349
      events->held_end = &events->held;
350
#if defined(HAVE_SIGPROCMASK) && defined(SIG_SETMASK)
351
      /*-UNLOCK-*/ sigprocmask(SIG_SETMASK, &old_mask, NULL);
352
#endif
353
    }
354
 
355
    do {
356
      curr_event = held_events;
357
      held_events = curr_event->next;
358
      insert_event_entry(events, curr_event, curr_event->time_of_event);
359
    } while (held_events != NULL);
360
  }
361
 
362
  /* advance time, checking to see if we've reached time zero which
363
     would indicate the time for the next event has arrived */
364
  time_from_event = events->time_from_event;
365
  events->time_from_event = time_from_event - 1;
366
  return time_from_event == 0;
367
}
368
 
369
 
370
 
371
INLINE_EVENTS\
372
(void)
373
event_queue_process(event_queue *events)
374
{
375
  signed64 event_time = event_queue_time(events);
376
 
377
  ASSERT((events->time_from_event == -1 && events->queue != NULL)
378
         || events->processing); /* something to do */
379
 
380
  /* consume all events for this or earlier times.  Be careful to
381
     allow a new event to appear under our feet */
382
  events->processing = 1;
383
  while (events->queue != NULL
384
         && events->queue->time_of_event <= event_time) {
385
    event_entry *to_do = events->queue;
386
    event_handler *handler = to_do->handler;
387
    void *data = to_do->data;
388
    events->queue = to_do->next;
389
    TRACE(trace_events, ("event issued at %ld - tag 0x%lx - time %ld, handler 0x%lx, data 0x%lx\n",
390
                         (long)event_time,
391
                         (long)to_do,
392
                         (long)to_do->time_of_event,
393
                         (long)handler,
394
                         (long)data));
395
    zfree(to_do);
396
    /* Always re-compute the time to the next event so that HANDLER()
397
       can safely insert new events into the queue. */
398
    update_time_from_event(events);
399
    handler(data);
400
  }
401
  events->processing = 0;
402
 
403
  ASSERT(events->time_from_event > 0);
404
  ASSERT(events->queue != NULL); /* always poll event */
405
}
406
 
407
 
408
#endif /* _EVENTS_C_ */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.