OpenCores
URL https://opencores.org/ocsvn/spacewire_light/spacewire_light/trunk

Subversion Repositories spacewire_light

[/] [spacewire_light/] [trunk/] [sw/] [rtems_driver/] [spacewirelight.c] - Blame information for rev 12

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 6 jorisvr
/*
2
 * Copyright 2010 Joris van Rantwijk.
3
 *
4
 * This code is free software; you can redistribute it and/or modify
5
 * it under the terms of the GNU General Public License as published by
6
 * the Free Software Foundation; either version 2 of the License, or
7
 * (at your option) any later version.
8
 */
9
 
10
/**
11
 * @file    spacewirelight.c
12
 * @brief   SpaceWire Light driver for RTEMS 4.10 on LEON3.
13
 *
14
 * See spacewirelight.h for a description of the API.
15
 */
16
 
17
#include <stdlib.h>
18
#include <stdint.h>
19
#include <bsp.h>
20
#include <rtems/malloc.h>
21
 
22
#include "spacewirelight.h"
23
 
24
 
25
/**
26
 * Define SPWL_CHECK_CONCURRENT_CALL to explicitly guard against
27
 * invalid concurrent calls from multiple tasks.
28
 */
29
/* #define SPWL_CHECK_CONCURRENT_CALL */
30
 
31
/* Tell GCC that type-incompatible pointers may be aliases. */
32
#define __may_alias             __attribute__((may_alias))
33
 
34
/* Register addresses, relative to APB base address. */
35
#define SPWL_REG_CONTROL        0x00
36
#define SPWL_REG_STATUS         0x04
37
#define SPWL_REG_TXSCALER       0x08
38
#define SPWL_REG_TIMECODE       0x0c
39
#define SPWL_REG_RXDMA          0x10
40
#define SPWL_REG_TXDMA          0x14
41
 
42
/* Bit masks in registers */
43
#define SPWL_CONTROL_RESET      0x0001
44
#define SPWL_CONTROL_START      0x0004
45
#define SPWL_CONTROL_AUTOSTART  0x0008
46
#define SPWL_CONTROL_DISABLE    0x0010
47
#define SPWL_CONTROL_RXDMA      0x0040
48
#define SPWL_CONTROL_TXDMA      0x0080
49
#define SPWL_CONTROL_IESTATUS   0x0200
50
#define SPWL_CONTROL_IETICK     0x0400
51
#define SPWL_CONTROL_IERXDESC   0x0800
52
#define SPWL_CONTROL_IETXDESC   0x1000
53
#define SPWL_STATUS_TICK        0x0400
54
#define SPWL_STATUS_RXDESC      0x0800
55
#define SPWL_STATUS_TXDESC      0x1000
56
 
57
#define SPWL_ERROR_MASK         ((SPWL_ERR_DISCONNECT) | (SPWL_ERR_PARITY) | \
58
                                 (SPWL_ERR_ESCAPE) | (SPWL_ERR_CREDIT))
59
 
60
/* Descriptor flag bits */
61
#define SPWL_DESC_LENMASK       0x0000ffff
62
#define SPWL_DESC_EN            0x00010000
63
#define SPWL_DESC_IE            0x00040000
64
#define SPWL_DESC_DONE          0x00080000
65
#define SPWL_DESC_EOP           0x00100000
66
#define SPWL_DESC_EEP           0x00200000
67
 
68
/* Convert EOP bits from descriptor flags to library API.
69
   This depends on the specific values of the EOP flags. */
70
#define SPWL_EOP_DESC_TO_FLAG(f)    (((f) & (SPWL_DESC_EOP | SPWL_DESC_EEP)) >> 16)
71
#define SPWL_EOP_FLAG_TO_DESC(f)    (((f) & (SPWL_EOP | SPWL_EEP)) << 16)
72
 
73
 
74
/* Frame descriptor. */
75
struct descriptor_struct {
76
    volatile uint32_t flags;
77
    volatile uint32_t ptr;
78
};
79
 
80
 
81
/* Structure describing an open SpaceWire Light device. */
82
struct spwl_context {
83
    struct spwl_context *next;          /* Link to next context */
84
    unsigned long   devaddr;            /* Base address of APB registers */
85
    unsigned int    devirq;             /* Device IRQ */
86
    unsigned int    ndesc;              /* Size of descriptor tables */
87
    unsigned int    rxbufs;             /* Number of RX buffers */
88
    unsigned int    txbufs;             /* Number of allocatex TX buffers */
89
    unsigned int    rxbufsize;          /* Size of each receive buffer */
90
    unsigned int    txbufsize;          /* Size of each transmit buffer */
91
    volatile struct descriptor_struct *rxdesc;  /* RX descriptor table */
92
    volatile struct descriptor_struct *txdesc;  /* TX descriptor table */
93
    unsigned char   *rxdata;            /* RX data buffers */
94
    unsigned char   *txdata;            /* Internal TX data buffers */
95
    unsigned int    rxdescqh, rxdescqlen;   /* RX descriptor ring */
96
    unsigned int    txdescqh, txdescqlen;   /* TX descriptor ring */
97
    unsigned int    txdataqh;           /* Next internal TX buffer to use */
98
    spwl_txbuf_t    *txappqh, *txappqt; /* List of application TX buffers */
99
    unsigned int    txappnact;          /* Nr of active app buffers */
100
    unsigned int    txappnrcl;          /* Nr of reclaimable app buffers */
101
    unsigned int    currxpos;           /* Position in partial RX frame */
102
    unsigned int    curtxpos;           /* Position in partial TX frame */
103
    unsigned int    deftxscaler;        /* Default (10 Mbit) TX scaler */
104
    unsigned int    pendingerrors;      /* Pending error bits */
105
    unsigned int    errorcnt;           /* Count link error detections */
106
    rtems_id        seminterrupt;       /* Semaphore to wait for interrupt */
107
    rtems_isr_entry saved_isr;          /* Old interrupt handler */
108
#ifdef SPWL_CHECK_CONCURRENT_CALL
109
    int             recvbusy;           /* Inside receive function */
110
    int             sendbusy;           /* Inside send function */
111
#endif
112
};
113
 
114
 
115
/* Global linked list of spwl_context structures. */
116
static struct spwl_context *spwl_context_list = NULL;
117
 
118
/* Default options used by spwl_open_xxx() functions. */
119
static const struct spwl_options spwl_default_options = SPWL_OPTIONS_DEFAULT;
120
 
121
 
122
/*
123
 * == Locking ==
124
 *
125
 * The "spwl_context" structure may be accessed concurrently by one or more
126
 * tasks through the API, as well as by the interrupt handler. The following
127
 * synchronization rules apply:
128
 *
129
 *  o Atomic access to hardware registers (i.e. read-modify-write control reg)
130
 *    is done with interrupts disabled. The only exception is inside the
131
 *    interrupt handler itself.
132
 *
133
 *  o Exclusive access to the context structure is ensured by disabling
134
 *    interrupts. Also the global linked list of context structures is
135
 *    accessed with interrupts disabled.
136
 *
137
 *  o During data copying in spwl_recv() and spwl_send(), interrupts are
138
 *    restored to the previous setting to avoid keeping interrupts disabled
139
 *    for unlimited periods of time.
140
 *
141
 *  o A binary semaphore is used to let tasks wait until an interrupt occurs.
142
 *    One issue here is to avoid races where the interrupt occurs before
143
 *    the task starts waiting. Another issue is that multiple tasks may be
144
 *    waiting for the same interrupt, in which case the interrupt should
145
 *    wake them all up.
146
 */
147
 
148
 
149
/* Read from a 32-bit register. */
150
static inline uint32_t readreg(unsigned long addr)
151
{
152
    uint32_t ret;
153
    asm volatile (
154
        "lda [%1] 1, %0"
155
        : "=r" (ret)
156
        : "r" (addr) );
157
    return ret;
158
}
159
 
160
 
161
/* Write to a 32-bit register. */
162
static inline void writereg(unsigned long addr, uint32_t v)
163
{
164
    *((volatile uint32_t *)addr) = v;
165
}
166
 
167
 
168
/* Read a 32-bit word from memory, bypassing the data cache. */
169
static inline uint32_t readmem_nocache(const volatile uint32_t *addr)
170
{
171
    uint32_t ret;
172
    asm volatile (
173
        "lda [%1] 1, %0"
174
        : "=r" (ret)
175
        : "r" (addr) );
176
    return ret;
177
}
178
 
179
 
180
/* Read a byte from memory, bypassing the data cache. */
181
static inline char readmem_byte_nocache(const volatile char *addr)
182
{
183
    char ret;
184
    asm volatile (
185
        "lduba [%1] 1, %0"
186
        : "=r" (ret)
187
        : "r" (addr) );
188
    return ret;
189
}
190
 
191
 
192
/* Write a 32-bit word to memory. */
193
static inline void writemem(volatile uint32_t *addr, uint32_t v)
194
{
195
    *addr = v;
196
}
197
 
198
 
199
/* Copy data, bypassing the CPU data cache. */
200
static void memcpy_nocache(void *dest, const volatile void *src, size_t n)
201
{
202
    char __may_alias                *cdst = dest;
203
    const volatile char __may_alias *csrc = src;
204
 
205
    /* Copy word-at-a-time if both pointers are word-aligned. */
206
    if (((((unsigned long)dest) | ((unsigned long)src)) & 3) == 0) {
207
 
208
        /* Copy words. */
209
        uint32_t __may_alias                *wdst = (uint32_t *)dest;
210
        const volatile uint32_t __may_alias *wsrc = (const volatile uint32_t *)src;
211
        while (n >= 4) {
212
            *wdst = readmem_nocache(wsrc);
213
            wdst++;
214
            wsrc++;
215
            n -= 4;
216
        }
217
 
218
        /* Copy any remaining bytes with the byte-loop below. */
219
        cdst = (char *)wdst;
220
        csrc = (const volatile char *)wsrc;
221
    }
222
 
223
    /* Copy bytes. */
224
    while (n > 0) {
225
        *cdst = readmem_byte_nocache(csrc);
226
        cdst++;
227
        csrc++;
228
        n--;
229
    }
230
 
231
}
232
 
233
 
234
/* Enable bits in the control register. Called with interrupts disabled. */
235
static inline void spwl_ctrl_setbits(spwl_handle h, uint32_t setbits)
236
{
237
    uint32_t value;
238
    value = readreg(h->devaddr + SPWL_REG_CONTROL);
239
    value |= setbits;
240
    writereg(h->devaddr + SPWL_REG_CONTROL, value);
241
}
242
 
243
 
244
/*
245
 * Wait until the interrupt handler releases the specified semaphore.
246
 * Called with interrupts disabled but returns with interrupts enabled.
247
 */
248
static rtems_status_code wait_for_interrupt(rtems_id sem,
249
                                            rtems_interrupt_level level,
250
                                            rtems_interval timeout)
251
{
252
    rtems_status_code ret;
253
 
254
    /*
255
     * The interrupt has been enabled in the SpaceWire core, but interrupts
256
     * are disabled at the CPU level. Therefore nothing can happen until
257
     * we are safely sleeping inside rtems_semaphore_obtain().
258
     *
259
     * Blocking the task with interrupts disabled is apparently handled
260
     * correctly by RTEMS, i.e. the kernel updates the CPU interrupt status
261
     * as part of the context switch.
262
     */
263
    ret = rtems_semaphore_obtain(sem, RTEMS_WAIT, timeout);
264
 
265
    /* Restore interrupts. */
266
    rtems_interrupt_enable(level);
267
 
268
    /* If we got the semaphore, flush it to wake the other waiting tasks.
269
       rtems_semaphore_flush() can be pretty slow, which is why we call it
270
       with interrupts enabled. */
271
    if (ret == RTEMS_SUCCESSFUL)
272
        rtems_semaphore_flush(sem);
273
 
274
    return ret;
275
}
276
 
277
 
278
/* Reap (some) completed TX descriptors. Called with interrupts disabled. */
279
static void reap_tx_descriptors(spwl_handle h)
280
{
281
    unsigned int    txdescqt, txdescqp, nreap;
282
    uint32_t        descf;
283
 
284
    /* Stop if the TX ring is empty. */
285
    if (h->txdescqlen == 0)
286
        return;
287
 
288
    /* Stop if the tail descriptor in the TX ring is not yet complete. */
289
    txdescqt = (h->txdescqh - h->txdescqlen) & (h->ndesc - 1);
290
    descf = readmem_nocache(&h->txdesc[txdescqt].flags);
291
    if ((descf & SPWL_DESC_DONE) == 0)
292
        return;
293
 
294
    /* Check if the entire TX ring is complete;
295
       in that case reap everything, otherwise reap just one buffer. */
296
    txdescqp = (h->txdescqh - 1) & (h->ndesc - 1);
297
    descf = readmem_nocache(&h->txdesc[txdescqp].flags);
298
    if ((descf & SPWL_DESC_DONE) != 0)
299
        nreap = h->txdescqlen;
300
    else
301
        nreap = 1;
302
 
303
    /* Remove reaped buffers from TX ring. */
304
    h->txdescqlen -= nreap;
305
 
306
    /* If the reaped buffers are application buffers, move them to the
307
       list of reclaimable buffers. */
308
    if (h->txappnact > 0) {
309
        h->txappnact -= nreap;
310
        h->txappnrcl += nreap;
311
    }
312
}
313
 
314
 
315
/*
316
 * Interrupt handler.
317
 *
318
 * The interrupt handler does not do any data handling itself.
319
 * It simple releases a semaphore to wake up tasks that do the actual work.
320
 */
321
static void spwl_interrupt_handler(rtems_vector_number vec)
322
{
323
    struct spwl_context *ctx;
324
    uint32_t ctrl;
325
 
326
    /* Scan list of device contexts for a matching IRQ vector. */
327
    for (ctx = spwl_context_list; ctx != NULL; ctx = ctx->next) {
328
        if (ctx->devirq + 0x10 == vec) {
329
 
330
            /* Disable device interrupts. */
331
            ctrl   = readreg(ctx->devaddr + SPWL_REG_CONTROL);
332
            ctrl &= ~ (SPWL_CONTROL_IERXDESC | SPWL_CONTROL_IETXDESC |
333
                       SPWL_CONTROL_IETICK | SPWL_CONTROL_IESTATUS);
334
            writereg(ctx->devaddr + SPWL_REG_CONTROL, ctrl);
335
 
336
            /* Notify waiting tasks. */
337
            rtems_semaphore_release(ctx->seminterrupt);
338
        }
339
    }
340
}
341
 
342
 
343
#ifdef LEON3
344
/* Open a SpaceWire Light device. */
345
rtems_status_code spwl_open(spwl_handle *h,
346
                            unsigned int index,
347
                            const struct spwl_options *opt)
348
{
349
    amba_apb_device apbdev;
350
 
351
    /* Find device in APB plug&play configuration. */
352
    if (!amba_find_next_apbslv(&amba_conf,
353
                               VENDOR_OPENCORES, DEVICE_SPACEWIRELIGHT,
354
                               &apbdev, index)) {
355
        return RTEMS_INVALID_NUMBER;
356
    }
357
 
358
    return spwl_open_hwaddr(h, apbdev.start, apbdev.irq, opt);
359
}
360
#endif
361
 
362
 
363
/* Open a SpaceWire Light device. */
364
rtems_status_code spwl_open_hwaddr(spwl_handle *h,
365
                                   unsigned long addr, unsigned int irq,
366
                                   const struct spwl_options *opt)
367
{
368
    struct spwl_context *ctx;
369
    uint32_t t, desctablesize;
370
    rtems_status_code ret;
371
    rtems_interrupt_level level;
372
    unsigned int i;
373
    void *vp;
374
 
375
    /* Use default options if no options specified. */
376
    if (opt == NULL) {
377
        opt = &spwl_default_options;
378
    }
379
 
380
    /* Read configuration of SpaceWire Light core. */
381
    t = readreg(addr + SPWL_REG_CONTROL);
382
    desctablesize = (t >> 24);
383
    if (desctablesize < 4 || desctablesize > 14) {
384
        ret = RTEMS_IO_ERROR;
385
        goto errout;
386
    }
387
 
388
    if (opt->rxbufsize < 32 || opt->rxbufsize > 65532 ||
389
        opt->txbufsize < 32 || opt->txbufsize > 65532) {
390
        ret = RTEMS_INVALID_SIZE;
391
        goto errout;
392
    }
393
 
394
    /* Allocate context structure. */
395
    ctx = malloc(sizeof(struct spwl_context));
396
    if (ctx == NULL) {
397
        ret = RTEMS_NO_MEMORY;
398
        goto errout;
399
    }
400
 
401
    /* Initialize context structure. */
402
    ctx->devaddr    = addr;
403
    ctx->devirq     = irq;
404
    ctx->ndesc      = 1 << desctablesize;
405
    ctx->rxbufs     = opt->rxbufs;
406
    ctx->txbufs     = opt->txbufs;
407
    ctx->rxbufsize  = (opt->rxbufsize + 3) & (~3U);
408
    ctx->txbufsize  = (opt->txbufsize + 3) & (~3U);
409
    ctx->rxdescqh   = ctx->rxdescqlen = 0;
410
    ctx->txdescqh   = ctx->txdescqlen = 0;
411
    ctx->txdataqh   = 0;
412
    ctx->txappqt    = ctx->txappqh = NULL;
413
    ctx->txappnact  = 0;
414
    ctx->txappnrcl  = 0;
415
    ctx->currxpos   = 0;
416
    ctx->curtxpos   = 0;
417
#ifdef SPWL_CHECK_CONCURRENT_CALL
418
    ctx->recvbusy   = 0;
419
    ctx->sendbusy   = 0;
420
#endif
421
    ctx->pendingerrors = 0;
422
    ctx->errorcnt   = 0;
423
 
424
    /* Do not allocate more buffers than the size of the descriptor table. */
425
    if (ctx->rxbufs > ctx->ndesc)
426
        ctx->rxbufs = ctx->ndesc;
427
    if (ctx->txbufs > ctx->ndesc)
428
        ctx->txbufs = ctx->ndesc;
429
 
430
    /* Allocate RX/TX descriptor tables. */
431
    if (rtems_memalign(&vp, 8 * ctx->ndesc, 2 * 8 * ctx->ndesc)) {
432
        ret = RTEMS_NO_MEMORY;
433
        goto errout_desc;
434
    }
435
    ctx->rxdesc = ((struct descriptor_struct *)vp);
436
    ctx->txdesc = ((struct descriptor_struct *)vp) + ctx->ndesc;
437
 
438
    /* Allocate RX/TX data buffers. */
439
    if (rtems_memalign(&vp, 32, ctx->rxbufs * ctx->rxbufsize)) {
440
        ret = RTEMS_NO_MEMORY;
441
        goto errout_rxdata;
442
    }
443
    ctx->rxdata = vp;
444
    if (rtems_memalign(&vp, 32, ctx->txbufs * ctx->txbufsize)) {
445
        ret = RTEMS_NO_MEMORY;
446
        goto errout_txdata;
447
    }
448
    ctx->txdata = vp;
449
 
450
    /* Initialize semaphore. */
451
    ret = rtems_semaphore_create(
452
        rtems_build_name('S','P','W','L'),
453
        0,
454
        RTEMS_FIFO | RTEMS_SIMPLE_BINARY_SEMAPHORE,
455
        RTEMS_NO_PRIORITY,
456
        &ctx->seminterrupt);
457
    if (ret != RTEMS_SUCCESSFUL)
458
        goto errout_sem;
459
 
460
    /* Clear descriptor tables. */
461
    for (i = 0; i < ctx->ndesc; i++) {
462
        writemem(&ctx->rxdesc[i].flags, 0);
463
        writemem(&ctx->txdesc[i].flags, 0);
464
    }
465
 
466
    /* Fill RX descriptor table. */
467
    for (i = 0; i < ctx->rxbufs; i++) {
468
        unsigned char *pbuf = ctx->rxdata + i * ctx->rxbufsize;
469
        writemem(&ctx->rxdesc[i].ptr, (uint32_t)pbuf);
470
        writemem(&ctx->rxdesc[i].flags,
471
                 ctx->rxbufsize | SPWL_DESC_EN | SPWL_DESC_IE);
472
    }
473
    ctx->rxdescqh   = ctx->rxbufs & (ctx->ndesc - 1);
474
    ctx->rxdescqlen = ctx->rxbufs;
475
 
476
    /* Reset device. */
477
    writereg(ctx->devaddr + SPWL_REG_CONTROL, SPWL_CONTROL_RESET);
478
 
479
    /* Store initial TX scaler. */
480
    ctx->deftxscaler = readreg(ctx->devaddr + SPWL_REG_TXSCALER);
481
 
482
    /* Add context structure to linked list. */
483
    rtems_interrupt_disable(level);
484
    ctx->next = spwl_context_list;
485
    spwl_context_list = ctx;
486
    rtems_interrupt_enable(level);
487
 
488
    /* Register interrupt handler. */
489
    rtems_interrupt_catch(spwl_interrupt_handler, ctx->devirq + 0x10,
490
                          &ctx->saved_isr);
491
    LEON_Clear_interrupt(ctx->devirq);
492
    LEON_Unmask_interrupt(ctx->devirq);
493
 
494
    /* Initialize descriptor pointers. */
495
    writereg(ctx->devaddr + SPWL_REG_RXDMA, (uint32_t)(ctx->rxdesc));
496
    writereg(ctx->devaddr + SPWL_REG_TXDMA, (uint32_t)(ctx->txdesc));
497
 
498
    /* Start RX DMA. */
499
    writereg(ctx->devaddr + SPWL_REG_CONTROL, SPWL_CONTROL_RXDMA);
500
 
501
    *h = ctx;
502
    return RTEMS_SUCCESSFUL;
503
 
504
    /* Cleanup after error. */
505
errout_sem:
506
    free(ctx->txdata);
507
errout_txdata:
508
    free(ctx->rxdata);
509
errout_rxdata:
510
    free((void*)ctx->rxdesc);
511
errout_desc:
512
    free(ctx);
513
errout:
514
    return ret;
515
}
516
 
517
 
518
/* Close an open SpaceWire Light device. */
519
void spwl_close(spwl_handle h)
520
{
521
    struct spwl_context **ctxp;
522
    rtems_interrupt_level level;
523
 
524
    /* Reset device. */
525
    writereg(h->devaddr + SPWL_REG_CONTROL, SPWL_CONTROL_RESET);
526
 
527
    /* Unregister interrupt handler.
528
       NOTE: This is incorrect in case of shared interrupts. */
529
    LEON_Mask_interrupt(h->devirq);
530
    LEON_Clear_interrupt(h->devirq);
531
    rtems_interrupt_catch(h->saved_isr, h->devirq + 0x10, &h->saved_isr);
532
 
533
    /* Unlink context structure. */
534
    rtems_interrupt_disable(level);
535
    ctxp = &spwl_context_list;
536
    for (ctxp = &spwl_context_list; *ctxp != NULL; ctxp = &(*ctxp)->next) {
537
        if (*ctxp == h) {
538
            *ctxp = h->next;
539
            break;
540
        }
541
    }
542
    rtems_interrupt_enable(level);
543
 
544
    /* Delete semaphore. */
545
    rtems_semaphore_delete(h->seminterrupt);
546
 
547
    /* Release memory. */
548
    free(h->txdata);
549
    free(h->rxdata);
550
    free((void*)h->rxdesc);
551
    free(h);
552
}
553
 
554
 
555
/* Set the TX clock scaler for the link. */
556
rtems_status_code spwl_set_linkspeed(spwl_handle h, unsigned int scaler)
557
{
558
    writereg(h->devaddr + SPWL_REG_TXSCALER, scaler);
559
    return RTEMS_SUCCESSFUL;
560
}
561
 
562
 
563
/* Return the currently configured TX clock scaler. */
564
unsigned int spwl_get_linkspeed(spwl_handle h)
565
{
566
    return readreg(h->devaddr + SPWL_REG_TXSCALER);
567
}
568
 
569
 
570
/* Return the default TX scaler value. */
571
unsigned int spwl_get_default_linkspeed(spwl_handle h)
572
{
573
    return h->deftxscaler;
574
}
575
 
576
 
577
/* Change the mode of the SpaceWire link. */
578
rtems_status_code spwl_set_linkmode(spwl_handle h, spwl_linkmode mode)
579
{
580
    rtems_interrupt_level level;
581
    uint32_t ctrl, m;
582
 
583
    /* Convert link mode to bits in control register. */
584
    switch (mode) {
585
        case SPWL_LINKMODE_START:
586
            m = SPWL_CONTROL_START;
587
            break;
588
        case SPWL_LINKMODE_AUTOSTART:
589
            m = SPWL_CONTROL_AUTOSTART;
590
            break;
591
        case SPWL_LINKMODE_DISABLE:
592
            m = SPWL_CONTROL_DISABLE;
593
            break;
594
        default:
595
            m = 0;
596
            break;
597
    }
598
 
599
    /* Update control register. */
600
    rtems_interrupt_disable(level);
601
    ctrl = readreg(h->devaddr + SPWL_REG_CONTROL);
602
    ctrl &=  ~ (SPWL_CONTROL_START |
603
                SPWL_CONTROL_AUTOSTART |
604
                SPWL_CONTROL_DISABLE);
605
    ctrl |= m;
606
    writereg(h->devaddr + SPWL_REG_CONTROL, ctrl);
607
    rtems_interrupt_enable(level);
608
 
609
    return RTEMS_SUCCESSFUL;
610
}
611
 
612
 
613
/* Get status and pending errors of SpaceWire link. */
614
rtems_status_code spwl_get_linkstatus(spwl_handle h,
615
                                      spwl_linkstatus *linkstatus,
616
                                      unsigned int *errors)
617
{
618
    rtems_interrupt_level level;
619
    uint32_t status;
620
 
621
    rtems_interrupt_disable(level);
622
 
623
    /* Read status word and clear error flags. */
624
    status = readreg(h->devaddr + SPWL_REG_STATUS);
625
    writereg(h->devaddr + SPWL_REG_STATUS, status & SPWL_ERROR_MASK);
626
 
627
    /* Update error counter (needed in case spwl_wait() is in progress). */
628
    if ((status & SPWL_ERROR_MASK) != 0)
629
        h->errorcnt++;
630
 
631
    /* Accumulate error flags. */
632
    h->pendingerrors |= status;
633
 
634
    /* Clear pending errors if error status is requested. */
635
    if (errors) {
636
        status |= h->pendingerrors & SPWL_ERROR_MASK;
637
        h->pendingerrors = 0;
638
    }
639
 
640
    rtems_interrupt_enable(level);
641
 
642
    if (linkstatus)
643
        *linkstatus = status & 3;
644
    if (errors)
645
        *errors = status & (SPWL_ERROR_MASK | SPWL_ERR_AHB);
646
 
647
    return RTEMS_SUCCESSFUL;
648
}
649
 
650
 
651
/* Wait for specified condition with timeout. */
652
rtems_status_code spwl_wait(spwl_handle h,
653
                            unsigned int *cond, rtems_interval timeout)
654
{
655
    rtems_status_code   ret;
656
    rtems_interrupt_level level;
657
    unsigned int        i_cond = *cond;
658
    unsigned int        r_cond = 0;
659
    unsigned int        rxdescqt;
660
    unsigned int        prev_errorcnt = 0;
661
    rtems_interval      endtime = RTEMS_NO_TIMEOUT;
662
    rtems_interval      timeleft = timeout;
663
    uint32_t            status, ctrl, descf;
664
    int                 first = 1;
665
 
666
    /* Determine maximum wait time. */
667
    if (timeout != RTEMS_NO_TIMEOUT)
668
        endtime = rtems_clock_get_ticks_since_boot() + timeout;
669
 
670
    /* Wait until condition satisfied or timeout. */
671
    do {
672
 
673
        /* Disable global interrupts. */
674
        rtems_interrupt_disable(level);
675
 
676
        /* Store initial link error count to detect upcoming link events. */
677
        if (first)
678
            prev_errorcnt = h->errorcnt;
679
        first = 0;
680
 
681
        /* Enable relevant device interrupts. */
682
        ctrl = readreg(h->devaddr + SPWL_REG_CONTROL);
683
        if ((i_cond & SPWL_COND_RDYRECV) != 0)
684
            ctrl |= SPWL_CONTROL_IERXDESC;
685
        if ((i_cond & (SPWL_COND_RDYSEND |
686
                     SPWL_COND_RDYSENDBUF |
687
                     SPWL_COND_RECLAIM)) != 0)
688
            ctrl |= SPWL_CONTROL_IETXDESC;
689
        if ((i_cond & SPWL_COND_TIMECODE) != 0)
690
            ctrl |= SPWL_CONTROL_IETICK;
691
        if ((i_cond & (SPWL_COND_LINKUP | SPWL_COND_LINKDOWN)) != 0)
692
            ctrl |= SPWL_CONTROL_IESTATUS;
693
        writereg(h->devaddr + SPWL_REG_CONTROL, ctrl);
694
 
695
        /* Read status register and clear error flags. */
696
        status = readreg(h->devaddr + SPWL_REG_STATUS);
697
        writereg(h->devaddr + SPWL_REG_STATUS, status & SPWL_ERROR_MASK);
698
 
699
        /* Update error counter. */
700
        if ((status & SPWL_ERROR_MASK) != 0)
701
            h->errorcnt++;
702
 
703
        /* Accumulate error flags for spwl_get_linkstatus(). */
704
        h->pendingerrors |= status;
705
 
706
        /* Check for link up condition. */
707
        if ((i_cond & SPWL_COND_LINKUP) != 0 &&
708
            ((status & 3) == 3 || h->errorcnt > prev_errorcnt)) {
709
            /* Either the link is currently up, or a link error occurred
710
               since entering spwl_wait(), indicating that the link has
711
               been up even if it is already down again. */
712
            r_cond |= SPWL_COND_LINKUP;
713
        }
714
 
715
        /* Check for link down condition. */
716
        if ((i_cond & SPWL_COND_LINKDOWN) != 0 &&
717
            ((status & 3) != 3 || h->errorcnt > prev_errorcnt)) {
718
            /* Either the link is currently down, or a link error occured
719
               since entering spwl_wait(), indicating that the link has
720
               been down even if it is already up again. */
721
            r_cond |= SPWL_COND_LINKDOWN;
722
        }
723
 
724
        /* Check receive condition. */
725
        if ((i_cond & SPWL_COND_RDYRECV) != 0) {
726
            /* Check for received data in RX ring. */
727
            rxdescqt = (h->rxdescqh - h->rxdescqlen) & (h->ndesc - 1);
728
            descf = readmem_nocache(&h->rxdesc[rxdescqt].flags);
729
            if ((descf & SPWL_DESC_DONE) != 0)
730
                r_cond |= SPWL_COND_RDYRECV;
731
        }
732
 
733
        /* Check send/reclaim conditions. */
734
        if ((i_cond & (SPWL_COND_RDYSEND |
735
                       SPWL_COND_RDYSENDBUF |
736
                       SPWL_COND_RECLAIM)) != 0) {
737
 
738
            /* Reap completed TX descriptors. */
739
            reap_tx_descriptors(h);
740
 
741
            /* Check for room in TX ring and room in TX internal buffers
742
               and no application buffers in TX ring. */
743
            if ((i_cond & SPWL_COND_RDYSEND) != 0 &&
744
                h->txdescqlen < h->ndesc &&
745
                h->txdescqlen < h->txbufs &&
746
                h->txappnact == 0)
747
                r_cond |= SPWL_COND_RDYSEND;
748
 
749
            /* Check for room in TX ring and no internal buffers in TX ring. */
750
            if ((i_cond & SPWL_COND_RDYSENDBUF) != 0 &&
751
                h->txdescqlen < h->ndesc &&
752
                (h->txdescqlen == 0 || h->txappnact > 0))
753
                r_cond |= SPWL_COND_RDYSENDBUF;
754
 
755
            /* Check for non-empty reclaim list. */
756
            if ((i_cond & SPWL_COND_RECLAIM) != 0 &&
757
                h->txappnrcl > 0)
758
                r_cond |= SPWL_COND_RECLAIM;
759
        }
760
 
761
        /* Check for received time code. */
762
        if ((i_cond & SPWL_COND_TIMECODE) != 0 &&
763
            (status & SPWL_STATUS_TICK) != 0) {
764
            /* There is a pending timecode. */
765
            r_cond |= SPWL_COND_TIMECODE;
766
        }
767
 
768
        /* Stop waiting if any of the conditions has been satisfied. */
769
        if (r_cond != 0) {
770
            rtems_interrupt_enable(level);
771
            ret = RTEMS_SUCCESSFUL;
772
            break;
773
        }
774
 
775
        /* Wait for interrupt (returns with interrupts enabled). */
776
        ret = wait_for_interrupt(h->seminterrupt, level, timeleft);
777
 
778
        /* Recalculate the time left to wait. */
779
        if (timeout != RTEMS_NO_TIMEOUT) {
780
            rtems_interval tnow = rtems_clock_get_ticks_since_boot();
781
            if (tnow >= endtime) {
782
                ret = RTEMS_TIMEOUT;
783
                break;
784
            }
785
            timeleft = endtime - tnow;
786
        }
787
 
788
        /* Stop if the interrupt timed out. */
789
    } while (ret != RTEMS_TIMEOUT);
790
 
791
    /* Return */
792
    *cond = r_cond;
793
    return ret;
794
}
795
 
796
 
797
/* Transfer received data to the specified application buffer. */
798
rtems_status_code spwl_recv(spwl_handle h,
799
                            void *buf, size_t maxlen, size_t *ntrans,
800
                            unsigned int *eop, unsigned int flags)
801
{
802
    rtems_status_code   ret;
803
    rtems_interrupt_level level;
804
    size_t              r_ntrans = 0;
805
    unsigned int        r_eop = 0;
806
    unsigned int        rxdescqt;
807
    uint32_t            descf, descp, framelen, ncopy;
808
 
809
    /* Disable interrupts. */
810
    rtems_interrupt_disable(level);
811
 
812
#ifdef SPWL_CHECK_CONCURRENT_CALL
813
    /* Limit damage in case of concurrent calls. */
814
    if (h->recvbusy) {
815
        rtems_interrupt_enable(level);
816
        return RTEMS_RESOURCE_IN_USE;
817
    }
818
    h->recvbusy = 1;
819
#endif
820
 
821
    /* Transfer data until request satisfied. */
822
    while (1) {
823
 
824
        /* Transfer data until request satisfied or no more data available. */
825
        while (r_ntrans < maxlen && r_eop == 0) {
826
 
827
            /* Check that the RX ring is nonempty. */
828
            if (h->rxdescqlen == 0)
829
                break;
830
 
831
            /* Check that the frame at the tail of the RX ring is ready. */
832
            rxdescqt = (h->rxdescqh - h->rxdescqlen) & (h->ndesc - 1);
833
            descf = readmem_nocache(&h->rxdesc[rxdescqt].flags);
834
            descp = h->rxdesc[rxdescqt].ptr;
835
            if ((descf & SPWL_DESC_DONE) == 0) {
836
                /* No more received frames available. */
837
                break;
838
            }
839
 
840
            /* Re-enable interrupts during copying. */
841
            rtems_interrupt_enable(level);
842
 
843
            /* Copy data from current frame to application buffer. */
844
            framelen = descf & SPWL_DESC_LENMASK;
845
            ncopy    = framelen - h->currxpos;
846
            if (ncopy > maxlen - r_ntrans)
847
                ncopy = maxlen - r_ntrans;
848
            memcpy_nocache((unsigned char *)buf + r_ntrans,
849
                           (unsigned char *)descp + h->currxpos,
850
                           ncopy);
851
            r_ntrans    += ncopy;
852
            h->currxpos += ncopy;
853
 
854
            /* Re-disable interrupts. */
855
            rtems_interrupt_disable(level);
856
 
857
            /* Handle end of frame. */
858
            if (h->currxpos >= framelen) {
859
 
860
                /* Pass EOP flags to application. */
861
                r_eop = SPWL_EOP_DESC_TO_FLAG(descf);
862
 
863
                /* Reset partial frame position. */
864
                h->currxpos = 0;
865
 
866
                /* Resubmit buffer to head of RX ring. */
867
                writemem(&h->rxdesc[h->rxdescqh].ptr, descp);
868
                writemem(&h->rxdesc[h->rxdescqh].flags,
869
                         h->rxbufsize | SPWL_DESC_EN | SPWL_DESC_IE);
870
                h->rxdescqh = (h->rxdescqh + 1) & (h->ndesc - 1);
871
 
872
                /* Restart RX DMA. */
873
                spwl_ctrl_setbits(h, SPWL_CONTROL_RXDMA);
874
            }
875
        }
876
 
877
        /* Stop if request satisfied. */
878
        if (r_ntrans == maxlen || r_eop != 0) {
879
            ret = RTEMS_SUCCESSFUL;
880
            break;
881
        }
882
 
883
        /* No more received frames available.
884
           Stop if application does not want to wait. */
885
        if ((flags & SPWL_NO_WAIT) != 0) {
886
            ret = (r_ntrans > 0) ? RTEMS_SUCCESSFUL : RTEMS_UNSATISFIED;
887
            break;
888
        }
889
 
890
        /* Enable interrupt on data received. */
891
        spwl_ctrl_setbits(h, SPWL_CONTROL_IERXDESC);
892
 
893
        /* Final check for received data (avoid race condition). */
894
        rxdescqt = (h->rxdescqh - h->rxdescqlen) & (h->ndesc - 1);
895
        if (h->rxdescqlen == 0 ||
896
            (readmem_nocache(&h->rxdesc[rxdescqt].flags) & SPWL_DESC_DONE) == 0) {
897
            /* Wait until RX interrupt. */
898
            wait_for_interrupt(h->seminterrupt, level, RTEMS_NO_TIMEOUT);
899
            rtems_interrupt_disable(level);
900
        }
901
    }
902
 
903
    /* Restore interrupts. */
904
#ifdef SPWL_CHECK_CONCURRENT_CALL
905
    h->recvbusy = 0;
906
#endif
907
    rtems_interrupt_enable(level);
908
 
909
    /* Return */
910
    *ntrans = r_ntrans;
911
    *eop    = r_eop;
912
    return ret;
913
}
914
 
915
 
916
/* Send data to the SpaceWire link. */
917
rtems_status_code spwl_send(spwl_handle h,
918
                            const void *buf, size_t maxlen, size_t *ntrans,
919
                            unsigned int flags)
920
{
921
    rtems_status_code   ret;
922
    rtems_interrupt_level level;
923
    size_t              r_ntrans = 0;
924
    unsigned char *     bufp;
925
    unsigned int        txdescqp;
926
    uint32_t            descf, ncopy;
927
 
928
    /* Disable interrupts. */
929
    rtems_interrupt_disable(level);
930
 
931
#ifdef SPWL_CHECK_CONCURRENT_CALL
932
    /* Limit damage in case of concurrent calls. */
933
    if (h->sendbusy) {
934
        rtems_interrupt_enable(level);
935
        return RTEMS_RESOURCE_IN_USE;
936
    }
937
    h->sendbusy = 1;
938
#endif
939
 
940
    /* Transfer data until request satisfied. */
941
    while (1) {
942
 
943
        /* Reap completed TX descriptors if possible. */
944
        reap_tx_descriptors(h);
945
 
946
        /* Transfer data until request satisfied or no more room in TX bufs. */
947
        do {
948
 
949
            /* Check that there is a buffer available and that
950
               there are no application buffers in the TX ring. */
951
            if (h->txdescqlen >= h->ndesc ||
952
                h->txdescqlen >= h->txbufs ||
953
                h->txappnact > 0)
954
                break;
955
 
956
            /* Re-enable interrupts during copying. */
957
            rtems_interrupt_enable(level);
958
 
959
            /* Copy data from application buffer to internal TX buffer. */
960
            bufp  = h->txdata + h->txdataqh * h->txbufsize;
961
            ncopy = h->txbufsize - h->curtxpos;
962
            if (ncopy > maxlen - r_ntrans)
963
                ncopy = maxlen - r_ntrans;
964
            memcpy(bufp + h->curtxpos, (unsigned char *)buf + r_ntrans, ncopy);
965
            r_ntrans    += ncopy;
966
            h->curtxpos += ncopy;
967
 
968
            /* Re-disable interrupts. */
969
            rtems_interrupt_disable(level);
970
 
971
            /* Handle end of frame. */
972
            if (h->curtxpos >= h->txbufsize ||
973
                (flags & (SPWL_EOP | SPWL_EEP)) != 0) {
974
 
975
                /* Insert buffer in TX descriptor ring. */
976
                descf = h->curtxpos | SPWL_DESC_EN | SPWL_DESC_IE;
977
                if (r_ntrans == maxlen) {
978
                    /* Handle EOP. */
979
                    descf |= SPWL_EOP_FLAG_TO_DESC(flags);
980
                    flags &= ~(SPWL_EOP | SPWL_EEP);
981
                }
982
                writemem(&h->txdesc[h->txdescqh].ptr, (uint32_t)bufp);
983
                writemem(&h->txdesc[h->txdescqh].flags, descf);
984
                h->txdescqh = (h->txdescqh + 1) & (h->ndesc - 1);
985
                h->txdescqlen++;
986
 
987
                /* Advance internal TX buffer pointer. */
988
                h->curtxpos = 0;
989
                h->txdataqh++;
990
                if (h->txdataqh == h->txbufs)
991
                    h->txdataqh = 0;
992
 
993
                /* Restart TX DMA. */
994
                spwl_ctrl_setbits(h, SPWL_CONTROL_TXDMA);
995
            }
996
 
997
        } while (r_ntrans < maxlen);
998
 
999
        /* Stop when request satisfied. */
1000
        if (r_ntrans == maxlen && (flags & (SPWL_EOP | SPWL_EEP)) == 0) {
1001
            ret = RTEMS_SUCCESSFUL;
1002
            break;
1003
        }
1004
 
1005
        /* No more room in TX queue, but application wants to send more.
1006
           Stop if application does not want to wait. */
1007
        if ((flags & SPWL_NO_WAIT) != 0) {
1008
            ret = (r_ntrans > 0) ? RTEMS_SUCCESSFUL : RTEMS_UNSATISFIED;
1009
            break;
1010
        }
1011
 
1012
        /* Enable interrupt on frame transmitted */
1013
        spwl_ctrl_setbits(h, SPWL_CONTROL_IETXDESC);
1014
 
1015
        /* Final check for TX room (avoid race condition). */
1016
        if (h->txappnact > 0) {
1017
            /* Wait until all app buffers can be removed from the TX ring. */
1018
            txdescqp = (h->txdescqh - 1) & (h->ndesc - 1);
1019
        } else {
1020
            /* Wait until one buffer can be removed from the TX ring. */
1021
            txdescqp = (h->txdescqh - h->txdescqlen) & (h->ndesc - 1);
1022
        }
1023
        descf = readmem_nocache(&h->txdesc[txdescqp].flags);
1024
        if ((descf & SPWL_DESC_DONE) == 0) {
1025
            /* Wait until TX interrupt. */
1026
            wait_for_interrupt(h->seminterrupt, level, RTEMS_NO_TIMEOUT);
1027
            rtems_interrupt_disable(level);
1028
        }
1029
    }
1030
 
1031
    /* Restore interrupts. */
1032
#ifdef SPWL_CHECK_CONCURRENT_CALL
1033
    h->sendbusy = 0;
1034
#endif
1035
    rtems_interrupt_enable(level);
1036
 
1037
    /* Return */
1038
    *ntrans = r_ntrans;
1039
    return ret;
1040
}
1041
 
1042
 
1043
/* Receive data from the SpaceWire link without copying. */
1044
rtems_status_code spwl_recv_rxbuf(spwl_handle h,
1045
                                  void **buf,
1046
                                  uint16_t *nbytes, unsigned int *eop,
1047
                                  unsigned int flags)
1048
{
1049
    rtems_status_code   ret;
1050
    rtems_interrupt_level level;
1051
    unsigned int        rxdescqt;
1052
    uint32_t            descf;
1053
    void                *r_buf = NULL;
1054
    uint16_t            r_nbytes = 0;
1055
    unsigned int        r_eop = 0;
1056
 
1057
    /* Disable interrupts. */
1058
    rtems_interrupt_disable(level);
1059
 
1060
#ifdef SPWL_CHECK_CONCURRENT_CALL
1061
    /* Limit damage in case of concurrent calls. */
1062
    if (h->recvbusy) {
1063
        rtems_interrupt_enable(level);
1064
        return RTEMS_RESOURCE_IN_USE;
1065
    }
1066
    h->recvbusy = 1;
1067
#endif
1068
 
1069
    /* Make sure there is received data available. */
1070
    while (1) {
1071
 
1072
        /* Determine tail of RX ring. */
1073
        rxdescqt = (h->rxdescqh - h->rxdescqlen) & (h->ndesc - 1);
1074
 
1075
        /* Check if there is at least one received frame available. */
1076
        if (h->rxdescqlen > 0) {
1077
            descf = readmem_nocache(&h->rxdesc[rxdescqt].flags);
1078
            if ((descf & SPWL_DESC_DONE) != 0)
1079
                break;
1080
        }
1081
 
1082
        /* There is no data available.
1083
           Stop if the application does not want to wait. */
1084
        if ((flags & SPWL_NO_WAIT) != 0) {
1085
            ret = RTEMS_UNSATISFIED;
1086
            goto out_unlock;
1087
        }
1088
 
1089
        /* Enable interrupt on data received. */
1090
        spwl_ctrl_setbits(h, SPWL_CONTROL_IERXDESC);
1091
 
1092
        /* Final check for received data (avoid race condition). */
1093
        if (h->rxdescqlen > 0) {
1094
            descf = readmem_nocache(&h->rxdesc[rxdescqt].flags);
1095
            if ((descf & SPWL_DESC_DONE) != 0)
1096
                break;
1097
        }
1098
 
1099
        /* Wait until RX interrupt. */
1100
        wait_for_interrupt(h->seminterrupt, level, RTEMS_NO_TIMEOUT);
1101
        rtems_interrupt_disable(level);
1102
    }
1103
 
1104
    /* At least one received frame is available.
1105
       Remove buffer from RX ring and give it to the application. */
1106
    r_buf    = (void *)(h->rxdesc[rxdescqt].ptr);
1107
    r_nbytes = descf & SPWL_DESC_LENMASK;
1108
    r_eop    = SPWL_EOP_DESC_TO_FLAG(descf);
1109
    h->rxdescqlen--;
1110
 
1111
    /* Reset partial frame position.
1112
       Mixing calls to spwl_recv() and spwl_recv_rxbuf() is not supported. */
1113
    h->currxpos = 0;
1114
 
1115
    ret = RTEMS_SUCCESSFUL;
1116
 
1117
out_unlock:
1118
    /* Restore interrupts. */
1119
#ifdef SPWL_CHECK_CONCURRENT_CALL
1120
    h->recvbusy = 0;
1121
#endif
1122
    rtems_interrupt_enable(level);
1123
 
1124
    *buf    = r_buf;
1125
    *nbytes = r_nbytes;
1126
    *eop    = r_eop;
1127
    return ret;
1128
}
1129
 
1130
 
1131
/* Release receive buffers back to the driver. */
1132
rtems_status_code spwl_release_rxbuf(spwl_handle h, void *buf)
1133
{
1134
    rtems_interrupt_level level;
1135
 
1136
    /* Disable interrupts. */
1137
    rtems_interrupt_disable(level);
1138
 
1139
    /* Insert buffer at head of RX ring. */
1140
    writemem(&h->rxdesc[h->rxdescqh].ptr, (uint32_t)buf);
1141
    writemem(&h->rxdesc[h->rxdescqh].flags,
1142
             h->rxbufsize | SPWL_DESC_EN | SPWL_DESC_IE);
1143
    h->rxdescqh = (h->rxdescqh + 1) & (h->ndesc - 1);
1144
    h->rxdescqlen++;
1145
 
1146
    /* Restart RX DMA. */
1147
    spwl_ctrl_setbits(h, SPWL_CONTROL_RXDMA);
1148
 
1149
    /* Restore interrupts. */
1150
    rtems_interrupt_enable(level);
1151
 
1152
    return RTEMS_SUCCESSFUL;
1153
}
1154
 
1155
 
1156
/* Submit data for transmission to the SpaceWire link without copying. */
1157
rtems_status_code spwl_send_txbuf(spwl_handle h,
1158
                                  struct spwl_txbuf *buf, unsigned int flags)
1159
{
1160
    rtems_status_code   ret;
1161
    rtems_interrupt_level level;
1162
    unsigned int        txdescqp;
1163
    uint32_t            descf;
1164
 
1165
    /* Disable interrupts. */
1166
    rtems_interrupt_disable(level);
1167
 
1168
#ifdef SPWL_CHECK_CONCURRENT_CALL
1169
    /* Limit damage in case of concurrent calls. */
1170
    if (h->sendbusy) {
1171
        rtems_interrupt_enable(level);
1172
        return RTEMS_RESOURCE_IN_USE;
1173
    }
1174
    h->sendbusy = 1;
1175
#endif
1176
 
1177
    /* Make sure there is room in the TX ring. */
1178
    while (1) {
1179
 
1180
        /* Reap completed TX descriptors if possible. */
1181
        reap_tx_descriptors(h);
1182
 
1183
        if (h->txdescqlen > 0 && h->txappnact == 0) {
1184
            /* Internal buffers in the TX ring; wait until they are gone. */
1185
            txdescqp = (h->txdescqh - 1) & (h->ndesc - 1);
1186
        } else if (h->txdescqlen >= h->ndesc) {
1187
            /* TX ring is full; wait until at least one buffer is gone. */
1188
            txdescqp = (h->txdescqh - h->txdescqlen) & (h->ndesc - 1);
1189
        } else {
1190
            /* Good to go. */
1191
            break;
1192
        }
1193
 
1194
        /* There is currently no room.
1195
           Stop if the application does not want to wait. */
1196
        if ((flags & SPWL_NO_WAIT) != 0) {
1197
            ret = RTEMS_UNSATISFIED;
1198
            goto out_unlock;
1199
        }
1200
 
1201
        /* Enable interrupt on data transmitted. */
1202
        spwl_ctrl_setbits(h, SPWL_CONTROL_IETXDESC);
1203
 
1204
        /* Final check for completed TX descriptor (avoid race condition). */
1205
        descf = readmem_nocache(&h->txdesc[txdescqp].flags);
1206
        if ((descf & SPWL_DESC_DONE) == 0) {
1207
            /* Wait until TX interrupt. */
1208
            wait_for_interrupt(h->seminterrupt, level, RTEMS_NO_TIMEOUT);
1209
            rtems_interrupt_disable(level);
1210
        }
1211
    }
1212
 
1213
    /* There is room for at least one frame.
1214
       Insert buffer at head of application-buffer list. */
1215
    buf->next = NULL;
1216
    if (h->txappqh != NULL)
1217
        h->txappqh->next = buf;
1218
    else
1219
        h->txappqt = buf;
1220
    h->txappqh = buf;
1221
    h->txappnact++;
1222
 
1223
    /* Insert buffer at head of TX descriptor ring. */
1224
    descf = buf->nbytes |
1225
            SPWL_EOP_FLAG_TO_DESC(buf->eop) | SPWL_DESC_EN | SPWL_DESC_IE;
1226
    writemem(&h->txdesc[h->txdescqh].ptr, (uint32_t)(buf->data));
1227
    writemem(&h->txdesc[h->txdescqh].flags, descf);
1228
    h->txdescqh = (h->txdescqh + 1) & (h->ndesc - 1);
1229
    h->txdescqlen++;
1230
 
1231
    /* Restart TX DMA. */
1232
    spwl_ctrl_setbits(h, SPWL_CONTROL_TXDMA);
1233
 
1234
    ret = RTEMS_SUCCESSFUL;
1235
 
1236
out_unlock:
1237
    /* Restore interrupts. */
1238
#ifdef SPWL_CHECK_CONCURRENT_CALL
1239
    h->sendbusy = 0;
1240
#endif
1241
    rtems_interrupt_enable(level);
1242
 
1243
    return ret;
1244
}
1245
 
1246
 
1247
/* Reclaim transmit buffers after completion of transmission. */
1248
rtems_status_code spwl_reclaim_txbuf(spwl_handle h,
1249
                                     struct spwl_txbuf **buf, unsigned flags)
1250
{
1251
    rtems_status_code   ret;
1252
    rtems_interrupt_level level;
1253
    struct spwl_txbuf  *r_buf = NULL;
1254
    unsigned int        txdescqt;
1255
    uint32_t            descf;
1256
 
1257
    /* Disable interrupts. */
1258
    rtems_interrupt_disable(level);
1259
 
1260
    /* Make sure the reclaim list is not empty. */
1261
    while (1) {
1262
 
1263
        /* Reap completed TX descriptors if possible. */
1264
        reap_tx_descriptors(h);
1265
 
1266
        /* Check that the reclaim list is non-empty. */
1267
        if (h->txappnrcl > 0)
1268
            break;
1269
 
1270
        /* No buffers ready to reclaim.
1271
           Stop if the application does not want to wait. */
1272
        if ((flags & SPWL_NO_WAIT) != 0) {
1273
            ret = RTEMS_UNSATISFIED;
1274
            goto out_unlock;
1275
        }
1276
 
1277
        /* Enable interrupt on data transmitted. */
1278
        spwl_ctrl_setbits(h, SPWL_CONTROL_IETXDESC);
1279
 
1280
        /* Final check for completed TX descriptors (avoid race condition). */
1281
        if (h->txappnact > 0) {
1282
            /* There are application buffers in the TX ring.
1283
               Maybe one has completed in the mean time. */
1284
            txdescqt = (h->txdescqh - h->txdescqlen) & (h->ndesc - 1);
1285
            descf = readmem_nocache(&h->txdesc[txdescqt].flags);
1286
            if ((descf & SPWL_DESC_DONE) != 0)
1287
                continue;
1288
        }
1289
 
1290
        /* Wait until TX interrupt. */
1291
        wait_for_interrupt(h->seminterrupt, level, RTEMS_NO_TIMEOUT);
1292
        rtems_interrupt_disable(level);
1293
    }
1294
 
1295
    /* The reclaim list is non-empty.
1296
       Pass one reclaimable buffer to the application. */
1297
    r_buf = h->txappqt;
1298
    h->txappqt = h->txappqt->next;
1299
    if (h->txappqt == NULL)
1300
        h->txappqh = NULL;
1301
    h->txappnrcl--;
1302
    r_buf->next = NULL;
1303
 
1304
    ret = RTEMS_SUCCESSFUL;
1305
 
1306
out_unlock:
1307
    /* Restore interrupts. */
1308
    rtems_interrupt_enable(level);
1309
 
1310
    /* Return */
1311
    *buf = r_buf;
1312
    return ret;
1313
}
1314
 
1315
 
1316
/* Return last received timecode. */
1317
uint8_t spwl_get_timecode(spwl_handle h)
1318
{
1319
    uint32_t v;
1320
 
1321
    /* Clear "tick" bit in status register. */
1322
    writereg(h->devaddr + SPWL_REG_STATUS, SPWL_STATUS_TICK);
1323
 
1324
    /* Read last received timecode. */
1325
    v = readreg(h->devaddr + SPWL_REG_TIMECODE);
1326
    return v & 0xff;
1327
}
1328
 
1329
 
1330
/* Send a timecode to the SpaceWire link. */
1331
rtems_status_code spwl_send_timecode(spwl_handle h, uint8_t timecode)
1332
{
1333
    writereg(h->devaddr + SPWL_REG_TIMECODE, 0x10000 | (timecode << 8));
1334
    return RTEMS_SUCCESSFUL;
1335
}
1336
 
1337
/* vim: expandtab softtabstop=4
1338
*/
1339
/* end */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.