OpenCores
URL https://opencores.org/ocsvn/adv_debug_sys/adv_debug_sys/trunk

Subversion Repositories adv_debug_sys

[/] [adv_debug_sys/] [trunk/] [Software/] [adv_jtag_bridge/] [hardware_monitor.c] - Blame information for rev 42

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 42 nyawn
/* hardware_monitor.c -- Monitors and controls CPU stall state
2
   Copyright(C) 2010 Nathan Yawn <nyawn@opencores.org>
3
 
4
   This file was part the advanced debug unit / bridge.  It coordinates
5
   the CPU stall activity for the RSP server, the JSP server, and anything
6
   else that wants to stall the CPU, or know when it's running.
7
 
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 2 of the License, or
11
   (at your option) any later version.
12
 
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
 
18
   You should have received a copy of the GNU General Public License
19
   along with this program; if not, write to the Free Software
20
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
21
 
22
 
23
#include <stdio.h>
24
#include <unistd.h>
25
#include <sys/select.h>
26
#include <pthread.h>
27
#include <string.h> // for memcpy()
28
#include <errno.h>
29
 
30
#include "dbg_api.h"
31
#include "errcodes.h"
32
 
33
#define debug(...) //fprintf(stderr, __VA_ARGS__ )
34
 
35
#define MAX_MONITOR_CONNECTIONS 5
36
 
37
int monitor_thread_running = 0;
38
int target_is_running = 0;
39
 
40
typedef struct fdstruct
41
{
42
  int server_to_monitor_fds[2];
43
  int monitor_to_server_fds[2];
44
} fdstruct_t;
45
 
46
int num_monitor_connections = 0;
47
fdstruct_t connections[MAX_MONITOR_CONNECTIONS];
48
 
49
/* This mutex must be held when modify num_monitor_connections or the connections[] array.
50
 * The lock should no be released until the two are consistent with each other. */
51
pthread_mutex_t pipes_mutex = PTHREAD_MUTEX_INITIALIZER;
52
 
53
pthread_t target_handler_thread;
54
void *target_handler(void *arg);
55
 
56
void stall_cpu(int stall);
57
void notify_listeners(char *outstr, int length);
58
 
59
/*----------------------------------------------------------------------------*/
60
/* Public API functions                                                       */
61
 
62
int start_monitor_thread(void)
63
{
64
  // The target usually starts off running.  If it's not, then we'll just poll in the monitor thread,
65
  // detect it's stalled, and set this correctly
66
  target_is_running = 1;
67
 
68
  // Create the harware target polling thread
69
  if(pthread_create(&target_handler_thread, NULL, target_handler, NULL))
70
    {
71
      fprintf(stderr, "Failed to create target handler thread!\n");
72
      return 0;
73
    }
74
 
75
  // Set a variable that shows we're running
76
  monitor_thread_running = 1;
77
 
78
  return 1;
79
}
80
 
81
 
82
int register_with_monitor_thread(int pipe_fds[2])
83
{
84
 
85
  // Fail if monitor thread not running
86
  if(!monitor_thread_running)
87
    return 0;
88
 
89
  if(num_monitor_connections >= MAX_MONITOR_CONNECTIONS)
90
    return 0;
91
 
92
 
93
  pthread_mutex_lock(&pipes_mutex);
94
 
95
  // We need two pairs of pipes, one for each direction of communication
96
  if(0 > pipe(connections[num_monitor_connections].server_to_monitor_fds)) {  // pipe_fds[0] is for reading, [1] is for writing
97
    perror("Error creating pipes: ");
98
    return 0;
99
  }
100
 
101
  if(0 > pipe(connections[num_monitor_connections].monitor_to_server_fds)) {  // pipe_fds[0] is for reading, [1] is for writing
102
    perror("Error creating second pipes: ");
103
    return 0;
104
  }
105
 
106
  pipe_fds[0] = connections[num_monitor_connections].server_to_monitor_fds[1];
107
  pipe_fds[1] = connections[num_monitor_connections].monitor_to_server_fds[0];
108
 
109
  num_monitor_connections++;
110
  pthread_mutex_unlock(&pipes_mutex);
111
 
112
  return 1;
113
}
114
 
115
void unregister_with_monitor_thread(int pipe_fds[2])
116
{
117
  int i;
118
  int found = 0;
119
 
120
  // Don't bother with invalid pipe IDs.
121
  if((pipe_fds[0] < 0) || (pipe_fds[1] < 0))
122
    return;
123
 
124
  pthread_mutex_lock(&pipes_mutex);
125
 
126
  for(i = 0; i < num_monitor_connections; i++)
127
    {
128
      if(connections[i].server_to_monitor_fds[1] == pipe_fds[0] &&
129
         connections[i].monitor_to_server_fds[0] == pipe_fds[1])
130
        {
131
          found = 1;
132
          close(connections[i].server_to_monitor_fds[0]);
133
          close(connections[i].server_to_monitor_fds[1]);
134
          close(connections[i].monitor_to_server_fds[0]);
135
          close(connections[i].monitor_to_server_fds[1]);
136
          pipe_fds[0] = -1;
137
          pipe_fds[1] = -1;  // in case of multiple unregister attempts
138
          // Because we just add new connections to the end of the array, we have to
139
          // reshuffle when we delete one out of the middle.  We do this by taking
140
          // the last entry and moving it to the newly vacated spot.  Don't bother
141
          // if we're removing the last entry.
142
          if(i != (num_monitor_connections-1))
143
            {
144
              memcpy(&connections[i], &connections[num_monitor_connections-1], sizeof(fdstruct_t));
145
            }
146
          num_monitor_connections--;
147
          break;
148
        }
149
    }
150
 
151
  pthread_mutex_unlock(&pipes_mutex);
152
 
153
  if(!found)
154
    {
155
      fprintf(stderr, "Warning:  monitor thread did not find pipe set for unregistration! fd[0] is 0x%X, fd[1] is 0x%X\n", pipe_fds[0], pipe_fds[1]);
156
    }
157
 
158
}
159
 
160
 
161
 
162
///////////////////////////////////////////////////////////////////////////
163
//  Thread to poll for break on remote processor.
164
 
165
// Polling algorithm:
166
// Set timeout to 1/4 second.  This allows new pipe sets to be registered easily.
167
// poll/select on all valid incoming pipe fds
168
// If data, run all commands, send feedback to all registered servers, loop back to timeout determination
169
// if no data and target running, poll target state, send feedback if stopped
170
 
171
 
172
void *target_handler(void *arg)
173
{
174
  struct timeval tv;
175
  struct fd_set  readset;
176
  int i, fd, ret, nfds;
177
  char cmd;
178
  unsigned char target_status;
179
 
180
  debug("Target handler thread started!\n");
181
 
182
  while(1)
183
    {
184
      // Set this each loop, it may be changed by the select() call
185
      tv.tv_sec = 0;
186
      tv.tv_usec = 250000;  // 1/4 second timeout when polling
187
 
188
      FD_ZERO(&readset);
189
      nfds = 0;
190
 
191
      pthread_mutex_lock(&pipes_mutex);
192
      for(i = 0; i < num_monitor_connections; i++)
193
        {
194
          fd = connections[i].server_to_monitor_fds[0];
195
          FD_SET(fd, &readset);
196
          if(fd > nfds)
197
            nfds = fd;
198
        }
199
      pthread_mutex_unlock(&pipes_mutex);
200
      nfds++;
201
 
202
      // We do not hold the pipes_mutex during the select(), so it is possible that some of
203
      // the pipes in the readset will go away while we block.  This is fine, as we re-take
204
      // the lock below and iterate through the (changed) connections[] array, which will
205
      // ignore any pipes which have closed, even if they are in the readset.
206
 
207
      ret = select(nfds, &readset, NULL, NULL, &tv);
208
 
209
      if(ret == -1)  // error
210
        {
211
          // We may get an EBADF if a server un-registers its pipes while we're in the select() 
212
          // (very likely).  So, ignore EBADF unless there's a problem that needs debugged.
213
          if(errno != EBADF)
214
            perror("select()");
215
          else
216
            {
217
              debug("Monitor thread got EBADF in select().  Server unregistration, or real problem?");
218
            }
219
        }
220
      else if(ret != 0)  // fd ready (ret == 0 on timeout)
221
        {
222
          debug("Monitor thread got data\n");
223
          pthread_mutex_lock(&pipes_mutex);
224
          for(i = 0; i < num_monitor_connections; i++)
225
            {
226
              debug("Monitor checking incoming connection %i\n", i);
227
              fd = connections[i].server_to_monitor_fds[0];
228
              if(FD_ISSET(fd, &readset))
229
                {
230
                  ret = read(fd, &cmd, 1);
231
                  debug("Target monitor thread got command \'%c\' (0x%X)\n", cmd, cmd);
232
                  if(ret == 1)
233
                    {
234
                      if(cmd == 'S')
235
                        {
236
                          if(target_is_running)  stall_cpu(1);
237
                          notify_listeners("H", 1);
238
                        }
239
                      else if(cmd == 'U')
240
                        {
241
                          if(!target_is_running) stall_cpu(0);
242
                          notify_listeners("R", 1);
243
                        }
244
                      else
245
                        {
246
                          fprintf(stderr, "Target monitor thread got unknown command \'%c\' (0x%X)\n", cmd, cmd);
247
                        }
248
                    }
249
                  else
250
                    {
251
                      fprintf(stderr, "Monitor thread failed to read from ready descriptor!\n");
252
                    }
253
                }  // if FD_ISSET()
254
            }  // for i = 0 to num_monitor_connections
255
          pthread_mutex_unlock(&pipes_mutex);
256
 
257
          // We got a command.  Either the target is now stalled and we don't need to poll,
258
          // or the target just started and we should wait a bit before polling.
259
          continue;
260
 
261
        }  // else if (ret != 0)
262
 
263
 
264
      if(target_is_running)
265
        {
266
          debug("Monitor polling hardware!\n");
267
          // Poll target hardware
268
          ret = dbg_cpu0_read_ctrl(0, &target_status);
269
          if(ret != APP_ERR_NONE)
270
            fprintf(stderr, "ERROR 0x%X while polling target CPU status\n", ret);
271
          else {
272
            if(target_status & 0x01)  // Did we get the stall bit?  Bit 0 is STALL bit.
273
              {
274
                debug("Monitor poll found CPU stalled!\n");
275
                target_is_running = 0;
276
                pthread_mutex_lock(&pipes_mutex);
277
                notify_listeners("H", 1);
278
                pthread_mutex_unlock(&pipes_mutex);
279
              }
280
          }
281
        }  // if(target_is_running)
282
 
283
 
284
    }  // while(1), main loop
285
 
286
  fprintf(stderr, "Target monitor thread exiting!!");
287
 
288
  return arg;
289
}
290
 
291
 
292
 
293
///////////////////////////////////////////////////////////////////////////////
294
// Helper functions for the monitor thread
295
 
296
void stall_cpu(int stall)
297
{
298
  int retval = 0;
299
  unsigned char data = (stall>0)? 1:0;
300
 
301
  // Actually start or stop the CPU hardware
302
  retval = dbg_cpu0_write_ctrl(0, data);  // 0x01 is the STALL command bit
303
  if(retval != APP_ERR_NONE)
304
    fprintf(stderr, "ERROR 0x%X sending async STALL to target.\n", retval);
305
 
306
  target_is_running = !data;
307
 
308
  return;
309
}
310
 
311
/* Lock the pipes_mutex before calling this! */
312
void notify_listeners(char *outstr, int length)
313
{
314
  int i;
315
  int ret;
316
 
317
  for(i = 0; i < num_monitor_connections; i++)
318
    {
319
      ret = write(connections[i].monitor_to_server_fds[1], outstr, length);
320
      if(ret < 0) {
321
        perror("Error notifying listener in target monitor");
322
      }
323
      else if(ret == 0) {
324
        fprintf(stderr, "Monitor thread wrote 0 bytes attempting to notify server\n");
325
      }
326
    }
327
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.