1 |
22 |
dgisselq |
////////////////////////////////////////////////////////////////////////////////
|
2 |
|
|
//
|
3 |
|
|
// Filename: syspipe.c
|
4 |
|
|
//
|
5 |
|
|
// Project: CMod S6 System on a Chip, ZipCPU demonstration project
|
6 |
|
|
//
|
7 |
|
|
// Purpose: This "device" handles the primary device level interaction of
|
8 |
|
|
// almost all devices on the ZipOS: the pipe. A pipe, as defined
|
9 |
|
|
// here, is an O/S supported FIFO. Information written to the FIFO will
|
10 |
|
|
// be read from the FIFO in the order it was received. Attempts to read
|
11 |
|
|
// from an empty FIFO, or equivalently to write to a full FIFO, will block
|
12 |
|
|
// the reading (writing) process until memory is available.
|
13 |
|
|
//
|
14 |
|
|
// Creator: Dan Gisselquist, Ph.D.
|
15 |
|
|
// Gisselquist Technology, LLC
|
16 |
|
|
//
|
17 |
|
|
////////////////////////////////////////////////////////////////////////////////
|
18 |
|
|
//
|
19 |
|
|
// Copyright (C) 2015-2016, Gisselquist Technology, LLC
|
20 |
|
|
//
|
21 |
|
|
// This program is free software (firmware): you can redistribute it and/or
|
22 |
|
|
// modify it under the terms of the GNU General Public License as published
|
23 |
|
|
// by the Free Software Foundation, either version 3 of the License, or (at
|
24 |
|
|
// your option) any later version.
|
25 |
|
|
//
|
26 |
|
|
// This program is distributed in the hope that it will be useful, but WITHOUT
|
27 |
|
|
// ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY or
|
28 |
|
|
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
29 |
|
|
// for more details.
|
30 |
|
|
//
|
31 |
|
|
// You should have received a copy of the GNU General Public License along
|
32 |
|
|
// with this program. (It's in the $(ROOT)/doc directory, run make with no
|
33 |
|
|
// target there if the PDF file isn't present.) If not, see
|
34 |
|
|
// <http://www.gnu.org/licenses/> for a copy.
|
35 |
|
|
//
|
36 |
|
|
// License: GPL, v3, as defined and found on www.gnu.org,
|
37 |
|
|
// http://www.gnu.org/licenses/gpl.html
|
38 |
|
|
//
|
39 |
|
|
//
|
40 |
|
|
////////////////////////////////////////////////////////////////////////////////
|
41 |
|
|
//
|
42 |
|
|
//
|
43 |
|
|
#include "errno.h"
|
44 |
|
|
#include "board.h"
|
45 |
|
|
#include "taskp.h"
|
46 |
|
|
#include "syspipe.h"
|
47 |
|
|
#include "zipsys.h"
|
48 |
|
|
#include "ktraps.h"
|
49 |
|
|
|
50 |
|
|
#ifndef NULL
|
51 |
|
|
#define NULL (void *)0
|
52 |
|
|
#endif
|
53 |
|
|
|
54 |
|
|
static void clear_syspipe(SYSPIPE *p) {
|
55 |
|
|
p->m_head = 0;
|
56 |
|
|
p->m_tail = 0;
|
57 |
|
|
p->m_error = 0;
|
58 |
|
|
|
59 |
|
|
for(int i=0; i<=(int)p->m_mask; i++)
|
60 |
|
|
p->m_buf[i] = 0;
|
61 |
|
|
|
62 |
|
|
if ((p->m_rdtask)&&(p->m_rdtask != INTERRUPT_READ_TASK)) {
|
63 |
|
|
p->m_rdtask->context[1] = p->m_nread;
|
64 |
|
|
if (p->m_nread == 0)
|
65 |
|
|
p->m_rdtask->errno = -EFAULT;
|
66 |
|
|
p->m_rdtask->state = SCHED_READY;
|
67 |
|
|
} else if (p->m_wrtask) {
|
68 |
|
|
p->m_wrtask->context[1] = p->m_nwritten;
|
69 |
|
|
if (p->m_nwritten == 0)
|
70 |
|
|
p->m_wrtask->errno = -EFAULT;
|
71 |
|
|
p->m_wrtask->state = SCHED_READY;
|
72 |
|
|
}
|
73 |
|
|
|
74 |
|
|
if (p->m_rdtask != INTERRUPT_READ_TASK)
|
75 |
|
|
p->m_rdtask = 0;
|
76 |
|
|
p->m_wrtask = 0;
|
77 |
|
|
p->m_nread = 0;
|
78 |
|
|
p->m_nwritten = 0;
|
79 |
|
|
}
|
80 |
|
|
|
81 |
|
|
void kpush_syspipe(SYSPIPE *pipe, int val) {
|
82 |
|
|
int tst = (pipe->m_head+1)&pipe->m_mask;
|
83 |
|
|
if (tst != pipe->m_tail) {
|
84 |
|
|
pipe->m_buf[pipe->m_head] = val;
|
85 |
|
|
pipe->m_head = tst; // Increment the head pointer
|
86 |
|
|
if ((pipe->m_rdtask)&&(pipe->m_rdtask != INTERRUPT_READ_TASK))
|
87 |
|
|
pipe->m_rdtask->state = SCHED_READY;
|
88 |
|
|
} else pipe->m_error = 1;
|
89 |
|
|
}
|
90 |
|
|
|
91 |
|
|
void txchr(char v) {
|
92 |
|
|
volatile IOSPACE *sys = (IOSPACE *)IOADDR;
|
93 |
|
|
if (v < 10)
|
94 |
|
|
return;
|
95 |
|
|
v &= 0x0ff;
|
96 |
|
|
sys->io_pic = INT_UARTTX;
|
97 |
|
|
while((sys->io_pic&INT_UARTTX)==0)
|
98 |
|
|
;
|
99 |
|
|
sys->io_uart = v;
|
100 |
|
|
}
|
101 |
|
|
|
102 |
|
|
void txstr(const char *str) {
|
103 |
|
|
const char *ptr = str;
|
104 |
|
|
while(*ptr) {
|
105 |
|
|
txchr(*ptr++);
|
106 |
|
|
}
|
107 |
|
|
}
|
108 |
|
|
|
109 |
|
|
void txhex(int num) {
|
110 |
|
|
for(int ds=28; ds>=0; ds-=4) {
|
111 |
|
|
int ch;
|
112 |
|
|
ch = (num>>ds)&0x0f;
|
113 |
|
|
if (ch >= 10)
|
114 |
|
|
ch = 'A'+ch-10;
|
115 |
|
|
else
|
116 |
|
|
ch += '0';
|
117 |
|
|
txchr(ch);
|
118 |
|
|
} txstr("\r\n");
|
119 |
|
|
}
|
120 |
|
|
|
121 |
|
|
void pipe_panic(SYSPIPE *pipe) {
|
122 |
|
|
extern void kpanic(void);
|
123 |
|
|
volatile IOSPACE *sys = (IOSPACE *)IOADDR;
|
124 |
|
|
|
125 |
|
|
sys->io_spio = 0x0fa;
|
126 |
|
|
|
127 |
|
|
txstr("SYSPIPE PANIC!\r\n");
|
128 |
|
|
txstr("ADDR: "); txhex((int)pipe);
|
129 |
|
|
txstr("MASK: "); txhex(pipe->m_mask);
|
130 |
|
|
txstr("HEAD: "); txhex(pipe->m_head);
|
131 |
|
|
txstr("TAIL: "); txhex(pipe->m_tail);
|
132 |
|
|
kpanic();
|
133 |
|
|
}
|
134 |
|
|
|
135 |
|
|
int kpop_syspipe(SYSPIPE *pipe, int *vl) {
|
136 |
|
|
if (pipe->m_head != pipe->m_tail) {
|
137 |
|
|
*vl = pipe->m_buf[pipe->m_tail];
|
138 |
|
|
pipe->m_tail++;
|
139 |
|
|
if ((unsigned)pipe->m_tail > pipe->m_mask)
|
140 |
|
|
pipe->m_tail = 0;
|
141 |
|
|
if (pipe->m_wrtask)
|
142 |
|
|
pipe->m_wrtask->state = SCHED_READY;
|
143 |
|
|
return 0;
|
144 |
|
|
} return 1; // Error condition
|
145 |
|
|
}
|
146 |
|
|
|
147 |
|
|
SYSPIPE *new_syspipe(const unsigned int len) {
|
148 |
|
|
unsigned msk;
|
149 |
|
|
|
150 |
|
|
for(msk=2; msk<len; msk<<=1)
|
151 |
|
|
;
|
152 |
|
|
SYSPIPE *pipe = sys_malloc(sizeof(SYSPIPE)-1+msk);
|
153 |
|
|
pipe->m_mask = msk-1;
|
154 |
|
|
pipe->m_rdtask = pipe->m_wrtask = 0;
|
155 |
|
|
clear_syspipe(pipe);
|
156 |
|
|
return pipe;
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
int len_syspipe(SYSPIPE *p) {
|
160 |
|
|
return (p->m_head-p->m_tail) & p->m_mask;
|
161 |
|
|
}
|
162 |
|
|
int num_avail_syspipe(SYSPIPE *p) {
|
163 |
|
|
return (p->m_mask + p->m_tail-p->m_head) & p->m_mask;
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
// This will be called from a user context.
|
167 |
|
|
// Another task may write to the pipe during this call. If the pipe becomes
|
168 |
|
|
// full, that task will block.
|
169 |
|
|
//
|
170 |
|
|
static int uread_syspipe(TASKP tsk __attribute__((__unused__)), SYSPIPE *p, int *dst, int len) {
|
171 |
|
|
int nleft= len, h;
|
172 |
|
|
if (len == 0) {
|
173 |
|
|
// We'll only get here if we were released from within a
|
174 |
|
|
// writing task.
|
175 |
|
|
return p->m_nread;
|
176 |
|
|
} else do {
|
177 |
|
|
// We have a valid read request, for a new process. Continue
|
178 |
|
|
// 'reading' until we have fulfilled the request.
|
179 |
|
|
//
|
180 |
|
|
// We can read from head, just not write to it
|
181 |
|
|
// As for the tail pointer -- we own it, no one else can touch
|
182 |
|
|
// it.
|
183 |
|
|
h = ((volatile SYSPIPE *)p)->m_head;
|
184 |
|
|
if (h < p->m_tail) {
|
185 |
|
|
// The buffer wraps around the end. Thus, we first
|
186 |
|
|
// read anything between the tail pointer and the end
|
187 |
|
|
int ln1 = p->m_mask+1 - p->m_tail; // Navail to be read
|
188 |
|
|
ln1 = (ln1 > nleft) ? nleft : ln1;
|
189 |
|
|
if (ln1 > 0) {
|
190 |
|
|
register int *src = &p->m_buf[p->m_tail];
|
191 |
|
|
for(int i=0; i<ln1; i++)
|
192 |
|
|
*dst++ = *src++;
|
193 |
|
|
|
194 |
|
|
p->m_nread += ln1;
|
195 |
|
|
nleft -= ln1;
|
196 |
|
|
p->m_tail += ln1;
|
197 |
|
|
if ((unsigned)p->m_tail > p->m_mask)
|
198 |
|
|
p->m_tail = 0;
|
199 |
|
|
}
|
200 |
|
|
|
201 |
|
|
// nleft is either zero, or tail
|
202 |
|
|
if (nleft & -2)
|
203 |
|
|
exit(nleft);
|
204 |
|
|
else if (p->m_nread & -2)
|
205 |
|
|
exit(p->m_nread);
|
206 |
|
|
}
|
207 |
|
|
|
208 |
|
|
// Then repeat with the second half of the buffer, from the
|
209 |
|
|
// beginning to the head--unless we've exhausted our buffer.
|
210 |
|
|
if (nleft > 0) {
|
211 |
|
|
// Still need to do more, wrap around our buffer and
|
212 |
|
|
// restart
|
213 |
|
|
int ln1 = h - p->m_tail;
|
214 |
|
|
ln1 = (ln1 < nleft) ? ln1 : nleft;
|
215 |
|
|
|
216 |
|
|
int *src = &p->m_buf[p->m_tail];
|
217 |
|
|
for(int i=0; i<ln1; i++)
|
218 |
|
|
*dst++ = *src++;
|
219 |
|
|
|
220 |
|
|
p->m_nread += ln1;
|
221 |
|
|
nleft -= ln1;
|
222 |
|
|
p->m_tail += ln1;
|
223 |
|
|
if (p->m_tail == (int)p->m_mask+1)
|
224 |
|
|
p->m_tail = 0;
|
225 |
|
|
|
226 |
|
|
if (nleft & -2)
|
227 |
|
|
exit(nleft);
|
228 |
|
|
else if (p->m_nread & -2)
|
229 |
|
|
exit(p->m_nread);
|
230 |
|
|
}
|
231 |
|
|
|
232 |
|
|
if (nleft == 0)
|
233 |
|
|
break;
|
234 |
|
|
|
235 |
|
|
// We didn't finish our read, check for a blocked writing
|
236 |
|
|
// process to copy directly from. Note that we don't need
|
237 |
|
|
// to check the status of the write task--if it is set and
|
238 |
|
|
// we are active, then it is blocked and waiting for us to
|
239 |
|
|
// complete. Note also that this is treated as a volatile
|
240 |
|
|
// pointer. It can change from one time through our loop
|
241 |
|
|
// to the next.
|
242 |
|
|
if (((volatile SYSPIPE *)p)->m_wrtask) {
|
243 |
|
|
int *src, ln;
|
244 |
|
|
|
245 |
|
|
// If the head changed before the write task blocked,
|
246 |
|
|
// then go around again and copy some more before
|
247 |
|
|
// getting started
|
248 |
|
|
//
|
249 |
|
|
// This should never happen, however. If a write task
|
250 |
|
|
// gets assigned while a read task exists, it doesn't
|
251 |
|
|
// write its values into the buffer, it just waits.
|
252 |
|
|
// therefore we don't need to check for this.
|
253 |
|
|
//
|
254 |
|
|
// if (p->m_head != h)
|
255 |
|
|
// continue;
|
256 |
|
|
|
257 |
|
|
ln = nleft;
|
258 |
|
|
if (p->m_wrtask->context[4] < nleft)
|
259 |
|
|
ln = p->m_wrtask->context[4];
|
260 |
|
|
src = (int *)p->m_wrtask->context[3];
|
261 |
|
|
|
262 |
|
|
for(int i=0; i<ln; i++)
|
263 |
|
|
*dst++ = *src++;
|
264 |
|
|
|
265 |
|
|
p->m_nwritten += ln;
|
266 |
|
|
p->m_nread += ln;
|
267 |
|
|
|
268 |
|
|
nleft -= ln;
|
269 |
|
|
p->m_wrtask->context[4] -= ln;
|
270 |
|
|
p->m_wrtask->context[3] = (int)src;
|
271 |
|
|
|
272 |
|
|
// We have exhausted the write task. Release it
|
273 |
|
|
if (p->m_wrtask->context[4] == 0) { // wr_len == 0
|
274 |
|
|
// Release the write task, it has exhausted
|
275 |
|
|
// its buffer
|
276 |
|
|
TASKP w = p->m_wrtask;
|
277 |
|
|
// Now we allow other tasks to write into our
|
278 |
|
|
// pipe
|
279 |
|
|
p->m_wrtask = 0;
|
280 |
|
|
// And here we actually release the writing
|
281 |
|
|
// task
|
282 |
|
|
w->state = SCHED_READY;
|
283 |
|
|
}
|
284 |
|
|
}
|
285 |
|
|
|
286 |
|
|
// Realistically, we need to block here 'till more data is
|
287 |
|
|
// available. Need to determine how to do that. Until then,
|
288 |
|
|
// we'll just tell the scheduler to yield. This will in
|
289 |
|
|
// effect create a busy wait--not what we want, but it'll work.
|
290 |
|
|
if (nleft > 0) {
|
291 |
|
|
DISABLE_INTS();
|
292 |
|
|
h = ((volatile SYSPIPE *)p)->m_head;
|
293 |
|
|
if (h == p->m_tail)
|
294 |
|
|
wait(0,-1);
|
295 |
|
|
else
|
296 |
|
|
ENABLE_INTS();
|
297 |
|
|
}
|
298 |
|
|
} while(nleft > 0);
|
299 |
|
|
|
300 |
|
|
len = p->m_nread;
|
301 |
|
|
p->m_nread = 0;
|
302 |
|
|
// Release our ownership of the read end of the pipe
|
303 |
|
|
DISABLE_INTS();
|
304 |
|
|
p->m_rdtask = NULL;
|
305 |
|
|
if (((volatile SYSPIPE *)p)->m_wrtask)
|
306 |
|
|
p->m_wrtask->state = SCHED_READY;
|
307 |
|
|
ENABLE_INTS();
|
308 |
|
|
|
309 |
|
|
// We have accomplished our read
|
310 |
|
|
//
|
311 |
|
|
return len;
|
312 |
|
|
}
|
313 |
|
|
|
314 |
|
|
static int uwrite_syspipe(TASKP tsk __attribute__((__unused__)), SYSPIPE *p, int *src, int len) {
|
315 |
|
|
int nleft = len;
|
316 |
|
|
|
317 |
|
|
// The kernel guarantees, before we come into here, that we have a
|
318 |
|
|
// valid write request.
|
319 |
|
|
do {
|
320 |
|
|
// We try to fill this request without going through the pipes
|
321 |
|
|
// memory at all. Hence, if there is a read task that is
|
322 |
|
|
// waiting/suspended, waiting on a write (this must've happened
|
323 |
|
|
// since we started)--write directly into the read buffer first.
|
324 |
|
|
|
325 |
|
|
// If there is a read task blocked, the pipe must be empty
|
326 |
|
|
TASKP rdtask = ((volatile SYSPIPE *)p)->m_rdtask;
|
327 |
|
|
if (rdtask == INTERRUPT_READ_TASK) {
|
328 |
|
|
// We need to copy everything to the buffer
|
329 |
|
|
} else if (rdtask) {
|
330 |
|
|
// #warning "The previous code should have worked"
|
331 |
|
|
// if (((unsigned)rdtask+1) & -2)
|
332 |
|
|
int ln = nleft;
|
333 |
|
|
if (ln > p->m_rdtask->context[4])
|
334 |
|
|
ln = p->m_rdtask->context[4];
|
335 |
|
|
int *dst = (int *)p->m_rdtask->context[3];
|
336 |
|
|
for(int i=0; i<ln; i++)
|
337 |
|
|
*dst++ = *src++;
|
338 |
|
|
p->m_nread += ln;
|
339 |
|
|
p->m_rdtask->context[3]+= ln;
|
340 |
|
|
p->m_rdtask->context[4]-= ln;
|
341 |
|
|
nleft -= ln;
|
342 |
|
|
p->m_nwritten += ln;
|
343 |
|
|
|
344 |
|
|
// Realistically, we always need to wake up the reader
|
345 |
|
|
// at this point. Either 1) we exhausted the readers
|
346 |
|
|
// buffer, or 2) we exhausted our own and the reader
|
347 |
|
|
// needs to take over. Here, we only handle the first
|
348 |
|
|
// case, leaving the rest for later.
|
349 |
|
|
if (p->m_rdtask->context[4] == 0) {
|
350 |
|
|
TASKP r = p->m_rdtask;
|
351 |
|
|
// Detach the reader task
|
352 |
|
|
p->m_rdtask = 0;
|
353 |
|
|
// Wake up the reader
|
354 |
|
|
r->state = SCHED_READY;
|
355 |
|
|
}
|
356 |
|
|
|
357 |
|
|
// While it might appear that we might close our loop
|
358 |
|
|
// here, that's not quite the case. It may be that the
|
359 |
|
|
// pipe is read from an interrupt context. In that
|
360 |
|
|
// case, there will never be any reader tasks, but we
|
361 |
|
|
// will still need to loop.
|
362 |
|
|
|
363 |
|
|
// Now that we've filled any existing reader task, we
|
364 |
|
|
// check whether or not we fit into the buffer. The
|
365 |
|
|
// rule is: don't write into the buffer unless
|
366 |
|
|
// everything will fit. Why? Well, if you have to
|
367 |
|
|
// block anyway, why not see if you can't avoid a
|
368 |
|
|
// double copy?
|
369 |
|
|
if (nleft == 0)
|
370 |
|
|
break;
|
371 |
|
|
}
|
372 |
|
|
|
373 |
|
|
// Copy whatever we have into the pipe's buffer
|
374 |
|
|
if ((nleft <= num_avail_syspipe(p))||(rdtask == INTERRUPT_READ_TASK)) {
|
375 |
|
|
// Either there is no immediate reader task, or
|
376 |
|
|
// the reader has been exhausted, but we've go
|
377 |
|
|
// more to write.
|
378 |
|
|
//
|
379 |
|
|
// Note that we no longer need to check what
|
380 |
|
|
// will fit into the pipe. We know the entire
|
381 |
|
|
// rest of our buffer will fit.
|
382 |
|
|
|
383 |
|
|
{ // Write into the first half of the pipe
|
384 |
|
|
int ln = p->m_mask+1-p->m_head;
|
385 |
|
|
int *dst = &p->m_buf[p->m_head];
|
386 |
|
|
if (ln > nleft) ln = nleft;
|
387 |
|
|
|
388 |
|
|
for(int i=0; i<ln; i++)
|
389 |
|
|
*dst++ = *src++;
|
390 |
|
|
|
391 |
|
|
p->m_head += ln;
|
392 |
|
|
nleft -= ln;
|
393 |
|
|
p->m_nwritten += ln;
|
394 |
|
|
if (p->m_head > (int)p->m_mask)
|
395 |
|
|
p->m_head = 0;
|
396 |
|
|
}
|
397 |
|
|
|
398 |
|
|
// Write into the rest of the pipe
|
399 |
|
|
if (nleft > 0) {
|
400 |
|
|
int ln = num_avail_syspipe(p);
|
401 |
|
|
if (nleft < ln)
|
402 |
|
|
ln = nleft;
|
403 |
|
|
int *dst = &p->m_buf[p->m_head];
|
404 |
|
|
|
405 |
|
|
for(int i=0; i<ln; i++)
|
406 |
|
|
*dst++ = *src++;
|
407 |
|
|
|
408 |
|
|
p->m_head += ln;
|
409 |
|
|
p->m_nwritten += ln;
|
410 |
|
|
nleft -= ln;
|
411 |
|
|
}
|
412 |
|
|
}
|
413 |
|
|
|
414 |
|
|
if (nleft > 0) {
|
415 |
|
|
if (rdtask == INTERRUPT_READ_TASK) {
|
416 |
|
|
DISABLE_INTS();
|
417 |
|
|
if (num_avail_syspipe(p)==0)
|
418 |
|
|
wait(0,-1);
|
419 |
|
|
else ENABLE_INTS();
|
420 |
|
|
} else {
|
421 |
|
|
DISABLE_INTS();
|
422 |
|
|
if (!((volatile SYSPIPE *)p)->m_rdtask)
|
423 |
|
|
wait(0,-1); // Should really be a wait
|
424 |
|
|
// on JIFFIES and if JIFFIES expired
|
425 |
|
|
// (i.e. write timeout) then break;
|
426 |
|
|
else ENABLE_INTS();
|
427 |
|
|
}
|
428 |
|
|
}
|
429 |
|
|
} while(nleft > 0);
|
430 |
|
|
|
431 |
|
|
int nw= p->m_nwritten;
|
432 |
|
|
p->m_wrtask = 0;
|
433 |
|
|
return nw;
|
434 |
|
|
}
|
435 |
|
|
|
436 |
|
|
// This will be called from a kernel (interrupt) context
|
437 |
|
|
void kread_syspipe(TASKP tsk, int dev, int *dst, int len) {
|
438 |
|
|
SYSPIPE *p = (SYSPIPE *)dev;
|
439 |
|
|
if (p->m_rdtask != NULL) {
|
440 |
|
|
// If the pipe already has a read task, then we fail
|
441 |
|
|
tsk->context[1] = -EBUSY;
|
442 |
|
|
zip_halt();
|
443 |
|
|
} else if (p->m_error) {
|
444 |
|
|
// If there's been an overrun, let the reader know on the
|
445 |
|
|
// next read--i.e. this one. Also, clear the error condition
|
446 |
|
|
// so that the following read will succeed.
|
447 |
|
|
tsk->context[1] = -EIO;
|
448 |
|
|
p->m_tail = p->m_head;
|
449 |
|
|
p->m_error = 0;
|
450 |
|
|
} else if (len <= 0) {
|
451 |
|
|
tsk->context[1] = -EFAULT;
|
452 |
|
|
zip_halt();
|
453 |
|
|
} else if (!valid_ram_region(dst, len)) {
|
454 |
|
|
// Bad parameters
|
455 |
|
|
tsk->context[1] = -EFAULT;
|
456 |
|
|
zip_halt();
|
457 |
|
|
} else {
|
458 |
|
|
// Take ownership of the read end of the pipe
|
459 |
|
|
p->m_rdtask = tsk;
|
460 |
|
|
p->m_nread = 0;
|
461 |
|
|
tsk->context[1] = (int)tsk;
|
462 |
|
|
tsk->context[2] = (int)p;
|
463 |
|
|
// These are already set, else we'd set them again
|
464 |
|
|
// tsk->context[3] = (int)dst;
|
465 |
|
|
// tsk->context[4] = len;
|
466 |
|
|
tsk->context[15] = (int)uread_syspipe;
|
467 |
|
|
|
468 |
|
|
// If there is already a write task, make sure it is awake
|
469 |
|
|
if (p->m_wrtask) {
|
470 |
|
|
tsk->state = SCHED_WAITING;
|
471 |
|
|
p->m_wrtask->state = SCHED_READY;
|
472 |
|
|
} else if (p->m_head == p->m_tail)
|
473 |
|
|
// If the pipe is empty, block the read task
|
474 |
|
|
tsk->state = SCHED_WAITING;
|
475 |
|
|
|
476 |
|
|
// On return, this will bring us back to user space, inside our
|
477 |
|
|
// user space version of the read system call
|
478 |
|
|
}
|
479 |
|
|
}
|
480 |
|
|
|
481 |
|
|
void kwrite_syspipe(TASKP tsk, int dev, int *src, int len) {
|
482 |
|
|
SYSPIPE *p = (SYSPIPE *)dev;
|
483 |
|
|
if (p->m_wrtask != NULL) {
|
484 |
|
|
// If the pipe already has a write task, then we fail
|
485 |
|
|
tsk->context[1] = -EBUSY;
|
486 |
|
|
} else if (len <= 0) {
|
487 |
|
|
tsk->context[1] = -EFAULT;
|
488 |
|
|
} else if (!valid_mem_region(src, len)) {
|
489 |
|
|
// Bad parameters
|
490 |
|
|
tsk->context[1] = -EFAULT;
|
491 |
|
|
zip_halt();
|
492 |
|
|
} else {
|
493 |
|
|
// Take ownership of the write end of the pipe
|
494 |
|
|
p->m_wrtask = tsk;
|
495 |
|
|
p->m_nwritten = 0;
|
496 |
|
|
tsk->context[1] = (int)tsk;
|
497 |
|
|
tsk->context[2] = (int)p;
|
498 |
|
|
// These are already set, else we'd set them again
|
499 |
|
|
// tsk->context[3] = (int)src;
|
500 |
|
|
// tsk->context[4] = len;
|
501 |
|
|
tsk->context[15] = (int)uwrite_syspipe;
|
502 |
|
|
|
503 |
|
|
// If a reader task currently exists, then block until that
|
504 |
|
|
// task either finishes or releases us
|
505 |
|
|
if ((p->m_rdtask)&&(p->m_rdtask != INTERRUPT_READ_TASK)) {
|
506 |
|
|
tsk->state = SCHED_WAITING;
|
507 |
|
|
p->m_rdtask->state = SCHED_READY;
|
508 |
|
|
} else if (((p->m_head+1)&p->m_mask) == (unsigned)p->m_tail)
|
509 |
|
|
// If the pipe is empty, block until there's data
|
510 |
|
|
tsk->state = SCHED_WAITING;
|
511 |
|
|
|
512 |
|
|
// On return, this will bring us back to user space, in our
|
513 |
|
|
// user space write call
|
514 |
|
|
}
|
515 |
|
|
}
|