OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [LINUX_2_4_26_OR32/] [linux/] [linux-2.4/] [fs/] [intermezzo/] [psdev.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2
 * vim:expandtab:shiftwidth=8:tabstop=8:
3
 *
4
 *              An implementation of a loadable kernel mode driver providing
5
 *              multiple kernel/user space bidirectional communications links.
6
 *
7
 *              Author:         Alan Cox <alan@cymru.net>
8
 *
9
 *              This program is free software; you can redistribute it and/or
10
 *              modify it under the terms of the GNU General Public License
11
 *              version 2 as published by the Free Software Foundation.
12
 *
13
 *              Adapted to become the Linux 2.0 Coda pseudo device
14
 *              Peter  Braam  <braam@maths.ox.ac.uk>
15
 *              Michael Callahan <mjc@emmy.smith.edu>
16
 *
17
 *              Changes for Linux 2.1
18
 *              Copyright (c) 1997 Carnegie-Mellon University
19
 *
20
 *              Redone again for InterMezzo
21
 *              Copyright (c) 1998 Peter J. Braam
22
 *              Copyright (c) 2000 Mountain View Data, Inc.
23
 *              Copyright (c) 2000 Tacitus Systems, Inc.
24
 *              Copyright (c) 2001 Cluster File Systems, Inc.
25
 *
26
 */
27
 
28
#include <linux/module.h>
29
#include <linux/errno.h>
30
#include <linux/kernel.h>
31
#include <linux/major.h>
32
#include <linux/sched.h>
33
#include <linux/lp.h>
34
#include <linux/slab.h>
35
#include <linux/ioport.h>
36
#include <linux/fcntl.h>
37
#include <linux/delay.h>
38
#include <linux/skbuff.h>
39
#include <linux/proc_fs.h>
40
#include <linux/vmalloc.h>
41
#include <linux/fs.h>
42
#include <linux/file.h>
43
#include <linux/poll.h>
44
#include <linux/init.h>
45
#include <linux/list.h>
46
#include <linux/devfs_fs_kernel.h>
47
#include <asm/io.h>
48
#include <asm/segment.h>
49
#include <asm/system.h>
50
#include <asm/poll.h>
51
#include <asm/uaccess.h>
52
#include <linux/miscdevice.h>
53
 
54
#include <linux/intermezzo_fs.h>
55
#include <linux/intermezzo_psdev.h>
56
 
57
 
58
#ifdef PRESTO_DEVEL
59
int  presto_print_entry = 1;
60
int  presto_debug = 4095;
61
#else
62
int  presto_print_entry = 0;
63
int  presto_debug = 0;
64
#endif
65
 
66
/* Like inode.c (presto_sym_iops), the initializer is just to prevent
67
   izo_channels from appearing as a COMMON symbol (and therefore
68
   interfering with other modules that use the same variable name). */
69
struct upc_channel izo_channels[MAX_CHANNEL] = {{0}};
70
 
71
int izo_psdev_get_free_channel(void)
72
{
73
        int i, result = -1;
74
 
75
        for (i = 0 ; i < MAX_CHANNEL ; i++ ) {
76
                if (list_empty(&(izo_channels[i].uc_cache_list))) {
77
                    result = i;
78
                    break;
79
                }
80
        }
81
        return result;
82
}
83
 
84
 
85
int izo_psdev_setpid(int minor)
86
{
87
        struct upc_channel *channel;
88
        if (minor < 0 || minor >= MAX_CHANNEL) {
89
                return -EINVAL;
90
        }
91
 
92
        channel = &(izo_channels[minor]);
93
        /*
94
         * This ioctl is performed by each Lento that starts up
95
         * and wants to do further communication with presto.
96
         */
97
        CDEBUG(D_PSDEV, "Setting current pid to %d channel %d\n",
98
               current->pid, minor);
99
        channel->uc_pid = current->pid;
100
        spin_lock(&channel->uc_lock);
101
        if ( !list_empty(&channel->uc_processing) ) {
102
                struct list_head *lh;
103
                struct upc_req *req;
104
                CERROR("WARNING: setpid & processing not empty!\n");
105
                lh = &channel->uc_processing;
106
                while ( (lh = lh->next) != &channel->uc_processing) {
107
                        req = list_entry(lh, struct upc_req, rq_chain);
108
                        /* freeing of req and data is done by the sleeper */
109
                        wake_up(&req->rq_sleep);
110
                }
111
        }
112
        if ( !list_empty(&channel->uc_processing) ) {
113
                CERROR("BAD: FAILDED TO CLEAN PROCESSING LIST!\n");
114
        }
115
        spin_unlock(&channel->uc_lock);
116
        EXIT;
117
        return 0;
118
}
119
 
120
int izo_psdev_setchannel(struct file *file, int fd)
121
{
122
 
123
        struct file *psdev_file = fget(fd);
124
        struct presto_cache *cache = presto_get_cache(file->f_dentry->d_inode);
125
 
126
        if (!psdev_file) {
127
                CERROR("%s: no psdev_file!\n", __FUNCTION__);
128
                return -EINVAL;
129
        }
130
 
131
        if (!cache) {
132
                CERROR("%s: no cache!\n", __FUNCTION__);
133
                fput(psdev_file);
134
                return -EINVAL;
135
        }
136
 
137
        if (psdev_file->private_data) {
138
                CERROR("%s: channel already set!\n", __FUNCTION__);
139
                fput(psdev_file);
140
                return -EINVAL;
141
        }
142
 
143
        psdev_file->private_data = cache->cache_psdev;
144
        fput(psdev_file);
145
        EXIT;
146
        return 0;
147
}
148
 
149
inline int presto_lento_up(int minor)
150
{
151
        return izo_channels[minor].uc_pid;
152
}
153
 
154
static unsigned int presto_psdev_poll(struct file *file, poll_table * wait)
155
 {
156
        struct upc_channel *channel = (struct upc_channel *)file->private_data;
157
        unsigned int mask = POLLOUT | POLLWRNORM;
158
 
159
        /* ENTRY; this will flood you */
160
        if ( ! channel ) {
161
                CERROR("%s: bad psdev file\n", __FUNCTION__);
162
                return -EBADF;
163
        }
164
 
165
        poll_wait(file, &(channel->uc_waitq), wait);
166
 
167
        spin_lock(&channel->uc_lock);
168
        if (!list_empty(&channel->uc_pending)) {
169
                CDEBUG(D_PSDEV, "Non-empty pending list.\n");
170
                mask |= POLLIN | POLLRDNORM;
171
        }
172
        spin_unlock(&channel->uc_lock);
173
 
174
        /* EXIT; will flood you */
175
        return mask;
176
}
177
 
178
/*
179
 *      Receive a message written by Lento to the psdev
180
 */
181
static ssize_t presto_psdev_write(struct file *file, const char *buf,
182
                                  size_t count, loff_t *off)
183
{
184
        struct upc_channel *channel = (struct upc_channel *)file->private_data;
185
        struct upc_req *req = NULL;
186
        struct upc_req *tmp;
187
        struct list_head *lh;
188
        struct izo_upcall_resp hdr;
189
        int error;
190
 
191
        if ( ! channel ) {
192
                CERROR("%s: bad psdev file\n", __FUNCTION__);
193
                return -EBADF;
194
        }
195
 
196
        /* Peek at the opcode, uniquefier */
197
        if ( count < sizeof(hdr) ) {
198
              CERROR("presto_psdev_write: Lento didn't write full hdr.\n");
199
                return -EINVAL;
200
        }
201
 
202
        error = copy_from_user(&hdr, buf, sizeof(hdr));
203
        if ( error )
204
                return -EFAULT;
205
 
206
        CDEBUG(D_PSDEV, "(process,opc,uniq)=(%d,%d,%d)\n",
207
               current->pid, hdr.opcode, hdr.unique);
208
 
209
        spin_lock(&channel->uc_lock);
210
        /* Look for the message on the processing queue. */
211
        lh  = &channel->uc_processing;
212
        while ( (lh = lh->next) != &channel->uc_processing ) {
213
                tmp = list_entry(lh, struct upc_req , rq_chain);
214
                if (tmp->rq_unique == hdr.unique) {
215
                        req = tmp;
216
                        /* unlink here: keeps search length minimal */
217
                        list_del_init(&req->rq_chain);
218
                        CDEBUG(D_PSDEV,"Eureka opc %d uniq %d!\n",
219
                               hdr.opcode, hdr.unique);
220
                        break;
221
                }
222
        }
223
        spin_unlock(&channel->uc_lock);
224
        if (!req) {
225
                CERROR("psdev_write: msg (%d, %d) not found\n",
226
                       hdr.opcode, hdr.unique);
227
                return(-ESRCH);
228
        }
229
 
230
        /* move data into response buffer. */
231
        if (req->rq_bufsize < count) {
232
                CERROR("psdev_write: too much cnt: %d, cnt: %d, "
233
                       "opc: %d, uniq: %d.\n",
234
                       req->rq_bufsize, count, hdr.opcode, hdr.unique);
235
                count = req->rq_bufsize; /* don't have more space! */
236
        }
237
        error = copy_from_user(req->rq_data, buf, count);
238
        if ( error )
239
                return -EFAULT;
240
 
241
        /* adjust outsize: good upcalls can be aware of this */
242
        req->rq_rep_size = count;
243
        req->rq_flags |= REQ_WRITE;
244
 
245
        wake_up(&req->rq_sleep);
246
        return(count);
247
}
248
 
249
/*
250
 *      Read a message from the kernel to Lento
251
 */
252
static ssize_t presto_psdev_read(struct file * file, char * buf,
253
                                 size_t count, loff_t *off)
254
{
255
        struct upc_channel *channel = (struct upc_channel *)file->private_data;
256
        struct upc_req *req;
257
        int result = count;
258
 
259
        if ( ! channel ) {
260
                CERROR("%s: bad psdev file\n", __FUNCTION__);
261
                return -EBADF;
262
        }
263
 
264
        spin_lock(&channel->uc_lock);
265
        if (list_empty(&(channel->uc_pending))) {
266
                CDEBUG(D_UPCALL, "Empty pending list in read, not good\n");
267
                spin_unlock(&channel->uc_lock);
268
                return -EINVAL;
269
        }
270
        req = list_entry((channel->uc_pending.next), struct upc_req, rq_chain);
271
        list_del(&(req->rq_chain));
272
        if (! (req->rq_flags & REQ_ASYNC) ) {
273
                list_add(&(req->rq_chain), channel->uc_processing.prev);
274
        }
275
        spin_unlock(&channel->uc_lock);
276
 
277
        req->rq_flags |= REQ_READ;
278
 
279
        /* Move the input args into userspace */
280
        CDEBUG(D_PSDEV, "\n");
281
        if (req->rq_bufsize <= count) {
282
                result = req->rq_bufsize;
283
        }
284
 
285
        if (count < req->rq_bufsize) {
286
                CERROR ("psdev_read: buffer too small, read %d of %d bytes\n",
287
                        count, req->rq_bufsize);
288
        }
289
 
290
        if ( copy_to_user(buf, req->rq_data, result) ) {
291
                BUG();
292
                return -EFAULT;
293
        }
294
 
295
        /* If request was asynchronous don't enqueue, but free */
296
        if (req->rq_flags & REQ_ASYNC) {
297
                CDEBUG(D_PSDEV, "psdev_read: async msg (%d, %d), result %d\n",
298
                       req->rq_opcode, req->rq_unique, result);
299
                PRESTO_FREE(req->rq_data, req->rq_bufsize);
300
                PRESTO_FREE(req, sizeof(*req));
301
                return result;
302
        }
303
 
304
        return result;
305
}
306
 
307
 
308
static int presto_psdev_open(struct inode * inode, struct file * file)
309
{
310
        ENTRY;
311
 
312
        file->private_data = NULL;
313
 
314
        MOD_INC_USE_COUNT;
315
 
316
        CDEBUG(D_PSDEV, "Psdev_open: caller: %d, flags: %d\n", current->pid, file->f_flags);
317
 
318
        EXIT;
319
        return 0;
320
}
321
 
322
 
323
 
324
static int presto_psdev_release(struct inode * inode, struct file * file)
325
{
326
        struct upc_channel *channel = (struct upc_channel *)file->private_data;
327
        struct upc_req *req;
328
        struct list_head *lh;
329
        ENTRY;
330
 
331
        if ( ! channel ) {
332
                CERROR("%s: bad psdev file\n", __FUNCTION__);
333
                return -EBADF;
334
        }
335
 
336
        MOD_DEC_USE_COUNT;
337
        CDEBUG(D_PSDEV, "Lento: pid %d\n", current->pid);
338
        channel->uc_pid = 0;
339
 
340
        /* Wake up clients so they can return. */
341
        CDEBUG(D_PSDEV, "Wake up clients sleeping for pending.\n");
342
        spin_lock(&channel->uc_lock);
343
        lh = &channel->uc_pending;
344
        while ( (lh = lh->next) != &channel->uc_pending) {
345
                req = list_entry(lh, struct upc_req, rq_chain);
346
 
347
                /* Async requests stay around for a new lento */
348
                if (req->rq_flags & REQ_ASYNC) {
349
                        continue;
350
                }
351
                /* the sleeper will free the req and data */
352
                req->rq_flags |= REQ_DEAD;
353
                wake_up(&req->rq_sleep);
354
        }
355
 
356
        CDEBUG(D_PSDEV, "Wake up clients sleeping for processing\n");
357
        lh = &channel->uc_processing;
358
        while ( (lh = lh->next) != &channel->uc_processing) {
359
                req = list_entry(lh, struct upc_req, rq_chain);
360
                /* freeing of req and data is done by the sleeper */
361
                req->rq_flags |= REQ_DEAD;
362
                wake_up(&req->rq_sleep);
363
        }
364
        spin_unlock(&channel->uc_lock);
365
        CDEBUG(D_PSDEV, "Done.\n");
366
 
367
        EXIT;
368
        return 0;
369
}
370
 
371
static struct file_operations presto_psdev_fops = {
372
        .read    = presto_psdev_read,
373
        .write   = presto_psdev_write,
374
        .poll    = presto_psdev_poll,
375
        .open    = presto_psdev_open,
376
        .release = presto_psdev_release
377
};
378
 
379
/* modules setup */
380
static struct miscdevice intermezzo_psdev = {
381
        INTERMEZZO_MINOR,
382
        "intermezzo",
383
        &presto_psdev_fops
384
};
385
 
386
int  presto_psdev_init(void)
387
{
388
        int i;
389
        int err;
390
 
391
        if ( (err = misc_register(&intermezzo_psdev)) ) {
392
                CERROR("%s: cannot register %d err %d\n",
393
                       __FUNCTION__, INTERMEZZO_MINOR, err);
394
                return -EIO;
395
        }
396
 
397
        memset(&izo_channels, 0, sizeof(izo_channels));
398
        for ( i = 0 ; i < MAX_CHANNEL ; i++ ) {
399
                struct upc_channel *channel = &(izo_channels[i]);
400
                INIT_LIST_HEAD(&channel->uc_pending);
401
                INIT_LIST_HEAD(&channel->uc_processing);
402
                INIT_LIST_HEAD(&channel->uc_cache_list);
403
                init_waitqueue_head(&channel->uc_waitq);
404
                channel->uc_lock = SPIN_LOCK_UNLOCKED;
405
                channel->uc_hard = 0;
406
                channel->uc_no_filter = 0;
407
                channel->uc_no_journal = 0;
408
                channel->uc_no_upcall = 0;
409
                channel->uc_timeout = 30;
410
                channel->uc_errorval = 0;
411
                channel->uc_minor = i;
412
        }
413
        return 0;
414
}
415
 
416
void presto_psdev_cleanup(void)
417
{
418
        int i;
419
 
420
        misc_deregister(&intermezzo_psdev);
421
 
422
        for ( i = 0 ; i < MAX_CHANNEL ; i++ ) {
423
                struct upc_channel *channel = &(izo_channels[i]);
424
                struct list_head *lh;
425
 
426
                spin_lock(&channel->uc_lock);
427
                if ( ! list_empty(&channel->uc_pending)) {
428
                        CERROR("Weird, tell Peter: module cleanup and pending list not empty dev %d\n", i);
429
                }
430
                if ( ! list_empty(&channel->uc_processing)) {
431
                        CERROR("Weird, tell Peter: module cleanup and processing list not empty dev %d\n", i);
432
                }
433
                if ( ! list_empty(&channel->uc_cache_list)) {
434
                        CERROR("Weird, tell Peter: module cleanup and cache listnot empty dev %d\n", i);
435
                }
436
                lh = channel->uc_pending.next;
437
                while ( lh != &channel->uc_pending) {
438
                        struct upc_req *req;
439
 
440
                        req = list_entry(lh, struct upc_req, rq_chain);
441
                        lh = lh->next;
442
                        if ( req->rq_flags & REQ_ASYNC ) {
443
                                list_del(&(req->rq_chain));
444
                                CDEBUG(D_UPCALL, "free pending upcall type %d\n",
445
                                       req->rq_opcode);
446
                                PRESTO_FREE(req->rq_data, req->rq_bufsize);
447
                                PRESTO_FREE(req, sizeof(struct upc_req));
448
                        } else {
449
                                req->rq_flags |= REQ_DEAD;
450
                                wake_up(&req->rq_sleep);
451
                        }
452
                }
453
                lh = &channel->uc_processing;
454
                while ( (lh = lh->next) != &channel->uc_processing ) {
455
                        struct upc_req *req;
456
                        req = list_entry(lh, struct upc_req, rq_chain);
457
                        list_del(&(req->rq_chain));
458
                        req->rq_flags |= REQ_DEAD;
459
                        wake_up(&req->rq_sleep);
460
                }
461
                spin_unlock(&channel->uc_lock);
462
        }
463
}
464
 
465
/*
466
 * lento_upcall and lento_downcall routines
467
 */
468
static inline unsigned long lento_waitfor_upcall
469
            (struct upc_channel *channel, struct upc_req *req, int minor)
470
{
471
        DECLARE_WAITQUEUE(wait, current);
472
        unsigned long posttime;
473
 
474
        req->rq_posttime = posttime = jiffies;
475
 
476
        add_wait_queue(&req->rq_sleep, &wait);
477
        for (;;) {
478
                if ( izo_channels[minor].uc_hard == 0 )
479
                        set_current_state(TASK_INTERRUPTIBLE);
480
                else
481
                        set_current_state(TASK_UNINTERRUPTIBLE);
482
 
483
                /* got a reply */
484
                if ( req->rq_flags & (REQ_WRITE | REQ_DEAD) )
485
                        break;
486
 
487
                /* these cases only apply when TASK_INTERRUPTIBLE */
488
                if ( !izo_channels[minor].uc_hard && signal_pending(current) ) {
489
                        /* if this process really wants to die, let it go */
490
                        if (sigismember(&(current->pending.signal), SIGKILL)||
491
                            sigismember(&(current->pending.signal), SIGINT) )
492
                                break;
493
                        /* signal is present: after timeout always return
494
                           really smart idea, probably useless ... */
495
                        if ( time_after(jiffies, req->rq_posttime +
496
                             izo_channels[minor].uc_timeout * HZ) )
497
                                break;
498
                }
499
                schedule();
500
        }
501
 
502
        spin_lock(&channel->uc_lock);
503
        list_del_init(&req->rq_chain);
504
        spin_unlock(&channel->uc_lock);
505
        remove_wait_queue(&req->rq_sleep, &wait);
506
        set_current_state(TASK_RUNNING);
507
 
508
        CDEBUG(D_SPECIAL, "posttime: %ld, returned: %ld\n",
509
               posttime, jiffies-posttime);
510
        return  (jiffies - posttime);
511
}
512
 
513
/*
514
 * lento_upcall will return an error in the case of
515
 * failed communication with Lento _or_ will peek at Lento
516
 * reply and return Lento's error.
517
 *
518
 * As lento has 2 types of errors, normal errors (positive) and internal
519
 * errors (negative), normal errors are negated, while internal errors
520
 * are all mapped to -EINTR, while showing a nice warning message. (jh)
521
 *
522
 * lento_upcall will always free buffer, either directly, when an upcall
523
 * is read (in presto_psdev_read), when the filesystem is unmounted, or
524
 * when the module is unloaded.
525
 */
526
int izo_upc_upcall(int minor, int *size, struct izo_upcall_hdr *buffer,
527
                   int async)
528
{
529
        unsigned long runtime;
530
        struct upc_channel *channel;
531
        struct izo_upcall_resp *out;
532
        struct upc_req *req;
533
        int error = 0;
534
 
535
        ENTRY;
536
        channel = &(izo_channels[minor]);
537
 
538
        if (channel->uc_no_upcall) {
539
                EXIT;
540
                goto exit_buf;
541
        }
542
        if (!channel->uc_pid && !async) {
543
                EXIT;
544
                error = -ENXIO;
545
                goto exit_buf;
546
        }
547
 
548
        /* Format the request message. */
549
        PRESTO_ALLOC(req, sizeof(struct upc_req));
550
        if ( !req ) {
551
                EXIT;
552
                error = -ENOMEM;
553
                goto exit_buf;
554
        }
555
        req->rq_data = (void *)buffer;
556
        req->rq_flags = 0;
557
        req->rq_bufsize = *size;
558
        req->rq_rep_size = 0;
559
        req->rq_opcode = buffer->u_opc;
560
        req->rq_unique = ++channel->uc_seq;
561
        init_waitqueue_head(&req->rq_sleep);
562
 
563
        /* Fill in the common input args. */
564
        buffer->u_uniq = req->rq_unique;
565
        buffer->u_async = async;
566
 
567
        spin_lock(&channel->uc_lock);
568
        /* Append msg to pending queue and poke Lento. */
569
        list_add(&req->rq_chain, channel->uc_pending.prev);
570
        spin_unlock(&channel->uc_lock);
571
        CDEBUG(D_UPCALL,
572
               "Proc %d waking Lento %d for(opc,uniq) =(%d,%d) msg at %p.\n",
573
               current->pid, channel->uc_pid, req->rq_opcode,
574
               req->rq_unique, req);
575
        wake_up_interruptible(&channel->uc_waitq);
576
 
577
        if ( async ) {
578
                /* req, rq_data are freed in presto_psdev_read for async */
579
                req->rq_flags = REQ_ASYNC;
580
                EXIT;
581
                return 0;
582
        }
583
 
584
        /* We can be interrupted while we wait for Lento to process
585
         * our request.  If the interrupt occurs before Lento has read
586
         * the request, we dequeue and return. If it occurs after the
587
         * read but before the reply, we dequeue, send a signal
588
         * message, and return. If it occurs after the reply we ignore
589
         * it. In no case do we want to restart the syscall.  If it
590
         * was interrupted by a lento shutdown (psdev_close), return
591
         * ENODEV.  */
592
 
593
        /* Go to sleep.  Wake up on signals only after the timeout. */
594
        runtime = lento_waitfor_upcall(channel, req, minor);
595
 
596
        CDEBUG(D_TIMING, "opc: %d time: %ld uniq: %d size: %d\n",
597
               req->rq_opcode, jiffies - req->rq_posttime,
598
               req->rq_unique, req->rq_rep_size);
599
        CDEBUG(D_UPCALL,
600
               "..process %d woken up by Lento for req at 0x%x, data at %x\n",
601
               current->pid, (int)req, (int)req->rq_data);
602
 
603
        if (channel->uc_pid) {      /* i.e. Lento is still alive */
604
          /* Op went through, interrupt or not we go on */
605
            if (req->rq_flags & REQ_WRITE) {
606
                    out = (struct izo_upcall_resp *)req->rq_data;
607
                    /* here we map positive Lento errors to kernel errors */
608
                    if ( out->result < 0 ) {
609
                            CERROR("Tell Peter: Lento returns negative error %d, for oc %d!\n",
610
                                   out->result, out->opcode);
611
                          out->result = EINVAL;
612
                    }
613
                    error = -out->result;
614
                    CDEBUG(D_UPCALL, "upcall: (u,o,r) (%d, %d, %d) out at %p\n",
615
                           out->unique, out->opcode, out->result, out);
616
                    *size = req->rq_rep_size;
617
                    EXIT;
618
                    goto exit_req;
619
            }
620
            /* Interrupted before lento read it. */
621
            if ( !(req->rq_flags & REQ_READ) && signal_pending(current)) {
622
                    CDEBUG(D_UPCALL,
623
                           "Interrupt before read: (op,un)=(%d,%d), flags %x\n",
624
                           req->rq_opcode, req->rq_unique, req->rq_flags);
625
                    /* perhaps the best way to convince the app to give up? */
626
                    error = -EINTR;
627
                    EXIT;
628
                    goto exit_req;
629
            }
630
 
631
            /* interrupted after Lento did its read, send signal */
632
            if ( (req->rq_flags & REQ_READ) && signal_pending(current) ) {
633
                    CDEBUG(D_UPCALL,"Interrupt after read: op = %d.%d, flags = %x\n",
634
                           req->rq_opcode, req->rq_unique, req->rq_flags);
635
 
636
                    error = -EINTR;
637
            } else {
638
                  CERROR("Lento: Strange interruption - tell Peter.\n");
639
                    error = -EINTR;
640
            }
641
        } else {        /* If lento died i.e. !UC_OPEN(channel) */
642
                CERROR("lento_upcall: Lento dead on (op,un) (%d.%d) flags %d\n",
643
                       req->rq_opcode, req->rq_unique, req->rq_flags);
644
                error = -ENODEV;
645
        }
646
 
647
exit_req:
648
        PRESTO_FREE(req, sizeof(struct upc_req));
649
exit_buf:
650
        return error;
651
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.