OpenCores
URL https://opencores.org/ocsvn/c0or1k/c0or1k/trunk

Subversion Repositories c0or1k

[/] [c0or1k/] [trunk/] [src/] [api/] [ipc.c] - Blame information for rev 6

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 drasko
/*
2
 * Inter-process communication
3
 *
4
 * Copyright (C) 2007-2009 Bahadir Bilgehan Balban
5
 */
6
#include <l4/generic/tcb.h>
7
#include <l4/lib/mutex.h>
8
#include <l4/api/ipc.h>
9
#include <l4/api/thread.h>
10
#include <l4/api/kip.h>
11
#include <l4/api/errno.h>
12
#include <l4/lib/bit.h>
13
#include <l4/lib/math.h>
14
#include INC_API(syscall.h)
15
#include INC_GLUE(message.h)
16
#include INC_GLUE(ipc.h)
17
 
18
int ipc_short_copy(struct ktcb *to, struct ktcb *from)
19
{
20
        unsigned int *mr0_src = KTCB_REF_MR0(from);
21
        unsigned int *mr0_dst = KTCB_REF_MR0(to);
22
 
23
        /* NOTE:
24
         * Make sure MR_TOTAL matches the number of registers saved on stack.
25
         */
26
        memcpy(mr0_dst, mr0_src, MR_TOTAL * sizeof(unsigned int));
27
 
28
        return 0;
29
}
30
 
31
 
32
/* Copy full utcb region from one task to another. */
33
int ipc_full_copy(struct ktcb *to, struct ktcb *from)
34
{
35
        struct utcb *from_utcb = (struct utcb *)from->utcb_address;
36
        struct utcb *to_utcb = (struct utcb *)to->utcb_address;
37
        int ret;
38
 
39
        /* First do the short copy of primary mrs */
40
        if ((ret = ipc_short_copy(to, from)) < 0)
41
                return ret;
42
 
43
        /* Check that utcb memory accesses won't fault us */
44
        if ((ret = tcb_check_and_lazy_map_utcb(to, 1)) < 0)
45
                return ret;
46
        if ((ret = tcb_check_and_lazy_map_utcb(from, 1)) < 0)
47
                return ret;
48
 
49
        /* Directly copy from one utcb to another */
50
        memcpy(to_utcb->mr_rest, from_utcb->mr_rest,
51
               MR_REST * sizeof(unsigned int));
52
 
53
        return 0;
54
}
55
 
56
/*
57
 * Extended copy is asymmetric in that the copying always occurs from
58
 * the sender's kernel stack to receivers userspace buffers.
59
 */
60
int ipc_extended_copy(struct ktcb *to, struct ktcb *from)
61
{
62
        unsigned long size = min(from->extended_ipc_size,
63
                                 to->extended_ipc_size);
64
 
65
        /*
66
         * Copy from sender's kernel stack buffer
67
         * to receiver's kernel stack buffer
68
         */
69
        memcpy(to->extended_ipc_buffer,
70
               from->extended_ipc_buffer, size);
71
 
72
        return 0;
73
}
74
 
75
/*
76
 * Copies message registers from one ktcb stack to another. During the return
77
 * from system call, the registers are popped from the stack. In the future
78
 * this should be optimised so that they shouldn't even be pushed to the stack
79
 *
80
 * This also copies the sender into MR0 in case the receiver receives from
81
 * L4_ANYTHREAD. This is done for security since the receiver cannot trust
82
 * the sender info provided by the sender task.
83
 */
84
int ipc_msg_copy(struct ktcb *to, struct ktcb *from)
85
{
86
        unsigned int recv_ipc_type;
87
        unsigned int send_ipc_type;
88
        unsigned int *mr0_dst;
89
        int ret = 0;
90
 
91
        recv_ipc_type = tcb_get_ipc_type(to);
92
        send_ipc_type = tcb_get_ipc_type(from);
93
 
94
        /*
95
         * Check ipc type flags of both parties and
96
         * use the following rules:
97
         *
98
         * SHORT        SHORT           -> SHORT IPC
99
         * FULL         FULL/SHORT      -> FULL IPC
100
         * EXTENDED     EXTENDED        -> EXTENDED IPC
101
         * EXTENDED     NON-EXTENDED    -> ENOIPC
102
         */
103
 
104
        switch(recv_ipc_type) {
105
        case IPC_FLAGS_SHORT:
106
                if (send_ipc_type == IPC_FLAGS_SHORT)
107
                        ret = ipc_short_copy(to, from);
108
                if (send_ipc_type == IPC_FLAGS_FULL)
109
                        ret = ipc_full_copy(to, from);
110
                if (send_ipc_type == IPC_FLAGS_EXTENDED)
111
                        ret = -ENOIPC;
112
                break;
113
        case IPC_FLAGS_FULL:
114
                if (send_ipc_type == IPC_FLAGS_SHORT)
115
                        ret = ipc_full_copy(to, from);
116
                if (send_ipc_type == IPC_FLAGS_FULL)
117
                        ret = ipc_full_copy(to, from);
118
                if (send_ipc_type == IPC_FLAGS_EXTENDED)
119
                        ret = -ENOIPC;
120
                break;
121
        case IPC_FLAGS_EXTENDED:
122
                if (send_ipc_type == IPC_FLAGS_EXTENDED)
123
                        /* We do a short copy as well. */
124
                        ret = ipc_short_copy(to, from);
125
                        ret = ipc_extended_copy(to, from);
126
                if (send_ipc_type == IPC_FLAGS_SHORT)
127
                        ret = -ENOIPC;
128
                if (send_ipc_type == IPC_FLAGS_FULL)
129
                        ret = -ENOIPC;
130
                break;
131
        }
132
 
133
        /* Save the sender id in case of ANYTHREAD receiver */
134
        if (to->expected_sender == L4_ANYTHREAD) {
135
                mr0_dst = KTCB_REF_MR0(to);
136
                mr0_dst[MR_SENDER] = from->tid;
137
        }
138
 
139
        return ret;
140
}
141
 
142
int sys_ipc_control(void)
143
{
144
        return -ENOSYS;
145
}
146
 
147
/*
148
 * Upon an ipc error or exception, the sleeper task is
149
 * notified of it via flags set by this function.
150
 */
151
void ipc_signal_error(struct ktcb *sleeper, int retval)
152
{
153
        /*
154
         * Only EFAULT and ENOIPC is expected for now
155
         */
156
        BUG_ON(retval != -EFAULT && retval != -ENOIPC);
157
 
158
        /*
159
         * Set ipc error flag for sleeper.
160
         */
161
        if (retval == -EFAULT)
162
                sleeper->ipc_flags |= IPC_EFAULT;
163
        if (retval == -ENOIPC)
164
                sleeper->ipc_flags |= IPC_ENOIPC;
165
}
166
 
167
/*
168
 * After an ipc, if current task was the sleeping party,
169
 * this checks whether errors were signalled, clears
170
 * the ipc flags and returns the appropriate error code.
171
 */
172
int ipc_handle_errors(void)
173
{
174
        /* Did we wake up normally or get interrupted */
175
        if (current->flags & TASK_INTERRUPTED) {
176
                current->flags &= ~TASK_INTERRUPTED;
177
                return -EINTR;
178
        }
179
 
180
        /* Did ipc fail with a fault error? */
181
        if (current->ipc_flags & IPC_EFAULT) {
182
                current->ipc_flags &= ~IPC_EFAULT;
183
                return -EFAULT;
184
        }
185
 
186
        /* Did ipc fail with a general ipc error? */
187
        if (current->ipc_flags & IPC_ENOIPC) {
188
                current->ipc_flags &= ~IPC_ENOIPC;
189
                return -ENOIPC;
190
        }
191
 
192
        return 0;
193
}
194
 
195
/*
196
 * NOTE:
197
 * Why can we safely copy registers and resume task
198
 * after we release the locks? Because even if someone
199
 * tried to interrupt and wake up the other party, they
200
 * won't be able to, because the task's all hooks to its
201
 * waitqueue have been removed at that stage.
202
 */
203
 
204
/* Interruptible ipc */
205
int ipc_send(l4id_t recv_tid, unsigned int flags)
206
{
207
        struct ktcb *receiver;
208
        struct waitqueue_head *wqhs, *wqhr;
209
        int ret = 0;
210
 
211
        if (!(receiver = tcb_find_lock(recv_tid)))
212
                return -ESRCH;
213
 
214
        wqhs = &receiver->wqh_send;
215
        wqhr = &receiver->wqh_recv;
216
 
217
        spin_lock(&wqhs->slock);
218
        spin_lock(&wqhr->slock);
219
 
220
        /* Ready to receive and expecting us? */
221
        if (receiver->state == TASK_SLEEPING &&
222
            receiver->waiting_on == wqhr &&
223
            (receiver->expected_sender == current->tid ||
224
             receiver->expected_sender == L4_ANYTHREAD)) {
225
                struct waitqueue *wq = receiver->wq;
226
 
227
                /* Remove from waitqueue */
228
                list_remove_init(&wq->task_list);
229
                wqhr->sleepers--;
230
                task_unset_wqh(receiver);
231
 
232
                /* Release locks */
233
                spin_unlock(&wqhr->slock);
234
                spin_unlock(&wqhs->slock);
235
 
236
                /* Copy message registers */
237
                if ((ret = ipc_msg_copy(receiver, current)) < 0)
238
                        ipc_signal_error(receiver, ret);
239
 
240
                // printk("%s: (%d) Waking up (%d)\n", __FUNCTION__,
241
                //       current->tid, receiver->tid);
242
 
243
                /* Wake it up async */
244
                sched_resume_async(receiver);
245
 
246
                /* Release thread lock (protects for delete) */
247
                spin_unlock(&receiver->thread_lock);
248
                return ret;
249
        }
250
 
251
        /* The receiver is not ready and/or not expecting us */
252
        CREATE_WAITQUEUE_ON_STACK(wq, current);
253
        wqhs->sleepers++;
254
        list_insert_tail(&wq.task_list, &wqhs->task_list);
255
        task_set_wqh(current, wqhs, &wq);
256
        sched_prepare_sleep();
257
        spin_unlock(&wqhr->slock);
258
        spin_unlock(&wqhs->slock);
259
        spin_unlock(&receiver->thread_lock);
260
        // printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
261
        //       current->tid, recv_tid);
262
        schedule();
263
 
264
        return ipc_handle_errors();
265
}
266
 
267
int ipc_recv(l4id_t senderid, unsigned int flags)
268
{
269
        struct waitqueue_head *wqhs, *wqhr;
270
        int ret = 0;
271
 
272
        wqhs = &current->wqh_send;
273
        wqhr = &current->wqh_recv;
274
 
275
        /*
276
         * Indicate who we expect to receive from,
277
         * so senders know.
278
         */
279
        current->expected_sender = senderid;
280
 
281
        spin_lock(&wqhs->slock);
282
        spin_lock(&wqhr->slock);
283
 
284
        /* Are there senders? */
285
        if (wqhs->sleepers > 0) {
286
                struct waitqueue *wq, *n;
287
                struct ktcb *sleeper;
288
 
289
                BUG_ON(list_empty(&wqhs->task_list));
290
 
291
                /* Look for a sender we want to receive from */
292
                list_foreach_removable_struct(wq, n, &wqhs->task_list, task_list) {
293
                        sleeper = wq->task;
294
 
295
                        /* Found a sender that we wanted to receive from */
296
                        if ((sleeper->tid == current->expected_sender) ||
297
                            (current->expected_sender == L4_ANYTHREAD)) {
298
                                list_remove_init(&wq->task_list);
299
                                wqhs->sleepers--;
300
                                task_unset_wqh(sleeper);
301
                                spin_unlock(&wqhr->slock);
302
                                spin_unlock(&wqhs->slock);
303
 
304
                                /* Copy message registers */
305
                                if ((ret = ipc_msg_copy(current, sleeper)) < 0)
306
                                        ipc_signal_error(sleeper, ret);
307
 
308
                                // printk("%s: (%d) Waking up (%d)\n",
309
                                // __FUNCTION__,
310
                                //       current->tid, sleeper->tid);
311
                                sched_resume_sync(sleeper);
312
                                return ret;
313
                        }
314
                }
315
        }
316
 
317
        /* The sender is not ready */
318
        CREATE_WAITQUEUE_ON_STACK(wq, current);
319
        wqhr->sleepers++;
320
        list_insert_tail(&wq.task_list, &wqhr->task_list);
321
        task_set_wqh(current, wqhr, &wq);
322
        sched_prepare_sleep();
323
        // printk("%s: (%d) waiting for (%d)\n", __FUNCTION__,
324
        //       current->tid, current->expected_sender);
325
        spin_unlock(&wqhr->slock);
326
        spin_unlock(&wqhs->slock);
327
        schedule();
328
 
329
        return ipc_handle_errors();
330
}
331
 
332
/*
333
 * Both sends and receives mregs in the same call. This is mainly by user
334
 * tasks for client server communication with system servers.
335
 *
336
 * Timeline of client/server communication using ipc_sendrecv():
337
 *
338
 * (1) User task (client) calls ipc_sendrecv();
339
 * (2) System task (server) calls ipc_recv() with from == ANYTHREAD.
340
 * (3) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
341
 * (4,5) User task, immediately calls ipc_recv(), expecting a origy from server.
342
 * (4,5) System task handles the request in userspace.
343
 * (6) System task calls ipc_send() sending the return result.
344
 * (7) Rendezvous occurs. Both tasks exchange mrs and leave rendezvous.
345
 */
346
int ipc_sendrecv(l4id_t to, l4id_t from, unsigned int flags)
347
{
348
        int ret = 0;
349
 
350
        if (to == from) {
351
                /* Send ipc request */
352
                if ((ret = ipc_send(to, flags)) < 0)
353
                        return ret;
354
                /*
355
                 * Get reply. A client would block its server
356
                 * only very briefly between these calls.
357
                 */
358
                if ((ret = ipc_recv(from, flags)) < 0)
359
                        return ret;
360
        } else {
361
                printk("%s: Unsupported ipc operation.\n", __FUNCTION__);
362
                ret = -ENOSYS;
363
        }
364
        return ret;
365
}
366
 
367
int ipc_sendrecv_extended(l4id_t to, l4id_t from, unsigned int flags)
368
{
369
        return -ENOSYS;
370
}
371
 
372
/*
373
 * In extended receive, receive buffers are page faulted before engaging
374
 * in real ipc.
375
 */
376
int ipc_recv_extended(l4id_t sendertid, unsigned int flags)
377
{
378
        unsigned long msg_index;
379
        unsigned long ipc_address;
380
        unsigned int size;
381
        unsigned int *mr0_current;
382
        int err;
383
 
384
        /*
385
         * Obtain primary message register index
386
         * containing extended ipc buffer address
387
         */
388
        msg_index = extended_ipc_msg_index(flags);
389
 
390
        /* Get the pointer to primary message registers */
391
        mr0_current = KTCB_REF_MR0(current);
392
 
393
        /* Obtain extended ipc address */
394
        ipc_address = (unsigned long)mr0_current[msg_index];
395
 
396
        /* Obtain extended ipc size */
397
        size = extended_ipc_msg_size(flags);
398
 
399
        /* Check size is good */
400
        if (size > IPC_EXTENDED_MAX_SIZE)
401
                return -EINVAL;
402
 
403
        /* Set extended ipc copy size */
404
        current->extended_ipc_size = size;
405
 
406
        /* Engage in real ipc to copy to ktcb buffer */
407
        if ((err = ipc_recv(sendertid, flags)) < 0)
408
                return err;
409
 
410
        /* Page fault user pages if needed */
411
        if ((err = check_access(ipc_address, size,
412
                                MAP_USR_RW, 1)) < 0)
413
                return err;
414
 
415
        /*
416
         * Now copy from ktcb to user buffers
417
         */
418
        memcpy((void *)ipc_address,
419
               current->extended_ipc_buffer,
420
               current->extended_ipc_size);
421
 
422
        return 0;
423
}
424
 
425
/*
426
 * In extended IPC, userspace buffers are copied to process
427
 * kernel stack before engaging in real calls ipc. If page fault
428
 * occurs, only the current process time is consumed.
429
 */
430
int ipc_send_extended(l4id_t recv_tid, unsigned int flags)
431
{
432
        unsigned long msg_index;
433
        unsigned long ipc_address;
434
        unsigned int size;
435
        unsigned int *mr0_current;
436
        int err;
437
 
438
        /*
439
         * Obtain primary message register index
440
         * containing extended ipc buffer address
441
         */
442
        msg_index = extended_ipc_msg_index(flags);
443
 
444
        /* Get the pointer to primary message registers */
445
        mr0_current = KTCB_REF_MR0(current);
446
 
447
        /* Obtain extended ipc address */
448
        ipc_address = (unsigned long)mr0_current[msg_index];
449
 
450
        /* Obtain extended ipc size */
451
        size = extended_ipc_msg_size(flags);
452
 
453
        /* Check size is good */
454
        if (size > IPC_EXTENDED_MAX_SIZE)
455
                return -EINVAL;
456
 
457
        /* Set extended ipc copy size */
458
        current->extended_ipc_size = size;
459
 
460
        /* Page fault those pages on the current task if needed */
461
        if ((err = check_access(ipc_address, size,
462
                                MAP_USR_RW, 1)) < 0)
463
                return err;
464
 
465
        /*
466
         * It is now safe to access user pages.
467
         * Copy message from user buffer into current kernel stack
468
         */
469
        memcpy(current->extended_ipc_buffer,
470
               (void *)ipc_address, size);
471
 
472
        /* Now we can engage in the real ipc */
473
        return ipc_send(recv_tid, flags);
474
}
475
 
476
 
477
static inline int __sys_ipc(l4id_t to, l4id_t from,
478
                            unsigned int ipc_dir, unsigned int flags)
479
{
480
        int ret;
481
 
482
        if (ipc_flags_get_type(flags) == IPC_FLAGS_EXTENDED) {
483
                switch (ipc_dir) {
484
                case IPC_SEND:
485
                        ret = ipc_send_extended(to, flags);
486
                        break;
487
                case IPC_RECV:
488
                        ret = ipc_recv_extended(from, flags);
489
                        break;
490
                case IPC_SENDRECV:
491
                        ret = ipc_sendrecv_extended(to, from, flags);
492
                        break;
493
                case IPC_INVALID:
494
                default:
495
                        printk("Unsupported ipc operation.\n");
496
                        ret = -ENOSYS;
497
                }
498
        } else {
499
                switch (ipc_dir) {
500
                case IPC_SEND:
501
                        ret = ipc_send(to, flags);
502
                        break;
503
                case IPC_RECV:
504
                        ret = ipc_recv(from, flags);
505
                        break;
506
                case IPC_SENDRECV:
507
                        ret = ipc_sendrecv(to, from, flags);
508
                        break;
509
                case IPC_INVALID:
510
                default:
511
                        printk("Unsupported ipc operation.\n");
512
                        ret = -ENOSYS;
513
                }
514
        }
515
        return ret;
516
}
517
 
518
void printk_sysregs(syscall_context_t *regs)
519
{
520
        printk("System call registers for tid: %d\n", current->tid);
521
        printk("R0: %x\n", regs->r0);
522
        printk("R1: %x\n", regs->r1);
523
        printk("R2: %x\n", regs->r2);
524
        printk("R3: %x\n", regs->r3);
525
        printk("R4: %x\n", regs->r4);
526
        printk("R5: %x\n", regs->r5);
527
        printk("R6: %x\n", regs->r6);
528
        printk("R7: %x\n", regs->r7);
529
        printk("R8: %x\n", regs->r8);
530
}
531
 
532
/*
533
 * sys_ipc has multiple functions. In a nutshell:
534
 * - Copies message registers from one thread to another.
535
 * - Sends notification bits from one thread to another. - Not there yet.
536
 * - Synchronises the threads involved in ipc. (i.e. a blocking rendez-vous)
537
 * - Can propagate messages from third party threads.
538
 * - A thread can both send and receive on the same call.
539
 */
540
int sys_ipc(l4id_t to, l4id_t from, unsigned int flags)
541
{
542
        unsigned int ipc_dir = 0;
543
        int ret = 0;
544
 
545
        /* Check arguments */
546
        if (tid_special_value(from) &&
547
            from != L4_ANYTHREAD && from != L4_NILTHREAD) {
548
                ret = -EINVAL;
549
                goto error;
550
        }
551
 
552
        if (tid_special_value(to) &&
553
            to != L4_ANYTHREAD && to != L4_NILTHREAD) {
554
                ret = -EINVAL;
555
                goto error;
556
        }
557
 
558
        /* Cannot send to self, or receive from self */
559
        if (from == current->tid || to == current->tid) {
560
                ret = -EINVAL;
561
                goto error;
562
        }
563
 
564
        /* [0] for Send */
565
        ipc_dir |= (to != L4_NILTHREAD);
566
 
567
        /* [1] for Receive, [1:0] for both */
568
        ipc_dir |= ((from != L4_NILTHREAD) << 1);
569
 
570
        if (ipc_dir == IPC_INVALID) {
571
                ret = -EINVAL;
572
                goto error;
573
        }
574
 
575
        /* Everything in place, now check capability */
576
        if ((ret = cap_ipc_check(to, from, flags, ipc_dir)) < 0)
577
                return ret;
578
 
579
        /* Encode ipc type in task flags */
580
        tcb_set_ipc_flags(current, flags);
581
 
582
        if ((ret = __sys_ipc(to, from, ipc_dir, flags)) < 0)
583
                goto error;
584
        return ret;
585
 
586
error:
587
        /*
588
         * This is not always an error. For example a send/recv
589
         * thread may go to suspension before receive phase.
590
         */
591
        //printk("Erroneous ipc by: %d. from: %d, to: %d, Err: %d\n",
592
        //       current->tid, from, to, ret);
593
        ipc_dir = IPC_INVALID;
594
        return ret;
595
}
596
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.