OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [tags/] [stable_0_2_0/] [or1ksim/] [peripheral/] [dma.c] - Blame information for rev 212

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 212 erez
/* dma.c -- Simulation of DMA
2
   Copyright (C) 2001 by Erez Volk, erez@mailandnews.com
3
 
4
   This file is part of OpenRISC 1000 Architectural Simulator.
5
 
6
   This program is free software; you can redistribute it and/or modify
7
   it under the terms of the GNU General Public License as published by
8
   the Free Software Foundation; either version 2 of the License, or
9
   (at your option) any later version.
10
 
11
   This program is distributed in the hope that it will be useful,
12
   but WITHOUT ANY WARRANTY; without even the implied warranty of
13
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
   GNU General Public License for more details.
15
 
16
   You should have received a copy of the GNU General Public License
17
   along with this program; if not, write to the Free Software
18
   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19
   */
20
 
21
/*
22
 * This simulation of the DMA core is not meant to be full.
23
 * It is written only to allow simulating the Ethernet core.
24
 * Of course, if anyone feels like perfecting it, feel free...
25
 */
26
 
27
#include "dma.h"
28
#include "sim-config.h"
29
#include "trace.h"
30
#include "pic.h"
31
#include "fields.h"
32
 
33
/* TODO List:
34
 * - "Restarting DMA Transfers"
35
 */
36
 
37
/* The representation of the DMA controllers */
38
static struct dma_controller dmas[NR_DMAS];
39
 
40
static unsigned long dma_read_ch_csr( struct dma_channel *channel );
41
static void dma_write_ch_csr( struct dma_channel *channel, unsigned long value );
42
static void dma_controller_clock( struct dma_controller *dma );
43
static void dma_load_descriptor( struct dma_channel *channel );
44
static void dma_init_transfer( struct dma_channel *channel );
45
static void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt );
46
 
47
static void masked_increase( unsigned long *value, unsigned long mask );
48
 
49
#define CHANNEL_ND_I(ch) (TEST_FLAG(ch->regs.csr,DMA_CH_CSR,MODE) && TEST_FLAG(ch->regs.csr,DMA_CH_CSR,USE_ED) && ch->dma_nd_i)
50
 
51
 
52
/* Reset. Initializes all registers to default and places devices in memory address space. */
53
void dma_reset()
54
{
55
    unsigned i;
56
 
57
    memset( dmas, 0, sizeof(dmas) );
58
 
59
    for ( i = 0; i < NR_DMAS; ++ i )
60
    {
61
        struct dma_controller *dma = &(dmas[i]);
62
        unsigned channel_number;
63
 
64
        dma->baseaddr = config.dmas[i].baseaddr;
65
        for ( channel_number = 0; channel_number < DMA_NUM_CHANNELS; ++ channel_number )
66
        {
67
            dma->ch[channel_number].controller = &(dmas[i]);
68
            dma->ch[channel_number].channel_number = channel_number;
69
            dma->ch[channel_number].channel_mask = 1LU << channel_number;
70
            dma->ch[channel_number].regs.am0 = dma->ch[channel_number].regs.am1 = 0xFFFFFFFC;
71
        }
72
        if ( dma->baseaddr != 0 )
73
            register_memoryarea( dma->baseaddr, DMA_ADDR_SPACE, dma_read, dma_write );
74
    }
75
}
76
 
77
/* Print register values on stdout */
78
void dma_status( void )
79
{
80
    unsigned i, j;
81
 
82
    for ( i = 0; i < NR_DMAS; ++ i )
83
    {
84
        struct dma_controller *dma = &(dmas[i]);
85
 
86
        if ( dma->baseaddr == 0 )
87
            continue;
88
 
89
        printf( "\nDMA controller %u at 0x%08X:\n", i, dma->baseaddr );
90
        printf( "CSR       : 0x%08lX\n", dma->regs.csr );
91
        printf( "INT_MSK_A : 0x%08lX\n", dma->regs.int_msk_a );
92
        printf( "INT_MSK_B : 0x%08lX\n", dma->regs.int_msk_b );
93
        printf( "INT_SRC_A : 0x%08lX\n", dma->regs.int_src_a );
94
        printf( "INT_SRC_B : 0x%08lX\n", dma->regs.int_src_b );
95
 
96
        for ( j = 0; j < DMA_NUM_CHANNELS; ++ j )
97
        {
98
            struct dma_channel *channel = &(dma->ch[j]);
99
            if ( !channel->referenced )
100
                continue;
101
            printf( "CH%u_CSR   : 0x%08lX\n", j, channel->regs.csr );
102
            printf( "CH%u_SZ    : 0x%08lX\n", j, channel->regs.sz );
103
            printf( "CH%u_A0    : 0x%08lX\n", j, channel->regs.a0 );
104
            printf( "CH%u_AM0   : 0x%08lX\n", j, channel->regs.am0 );
105
            printf( "CH%u_A1    : 0x%08lX\n", j, channel->regs.a1 );
106
            printf( "CH%u_AM1   : 0x%08lX\n", j, channel->regs.am1 );
107
            printf( "CH%u_DESC  : 0x%08lX\n", j, channel->regs.desc );
108
            printf( "CH%u_SWPTR : 0x%08lX\n", j, channel->regs.swptr );
109
        }
110
    }
111
}
112
 
113
 
114
/* Read a register */
115
unsigned long dma_read( unsigned long addr )
116
{
117
    unsigned i;
118
    struct dma_controller *dma = NULL;
119
 
120
    for ( i = 0; i < NR_DMAS && dma == NULL; ++ i )
121
    {
122
        if ( addr >= dmas[i].baseaddr && addr < dmas[i].baseaddr + DMA_ADDR_SPACE )
123
            dma = &(dmas[i]);
124
    }
125
 
126
    /* verify we found a controller */
127
    if ( dma == NULL )
128
    {
129
        debug( "dma_read( 0x%08lX ): Out of range\n", addr );
130
        cont_run = 0;
131
        return 0;
132
    }
133
 
134
    addr -= dma->baseaddr;
135
 
136
    if ( addr % 4 != 0 )
137
    {
138
        debug( "dma_read( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
139
        cont_run = 0;
140
        return 0;
141
    }
142
 
143
    /* case of global (not per-channel) registers */
144
    if ( addr < DMA_CH_BASE )
145
    {
146
        switch( addr )
147
        {
148
        case DMA_CSR: return dma->regs.csr;
149
        case DMA_INT_MSK_A: return dma->regs.int_msk_a;
150
        case DMA_INT_MSK_B: return dma->regs.int_msk_b;
151
        case DMA_INT_SRC_A: {
152
            /* TODO: Doc doesn't say clear the bits, but this looks right. Check it */
153
            unsigned long result = dma->regs.int_src_a;
154
            dma->regs.int_src_a = 0;
155
            return result;
156
        }
157
        case DMA_INT_SRC_B: {
158
            unsigned long result = dma->regs.int_src_b;
159
            dma->regs.int_src_b = 0;
160
            return result;
161
        }
162
        default:
163
            debug( "dma_read( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
164
            cont_run = 0;
165
            return 0;
166
        }
167
    }
168
    else
169
    {
170
        /* case of per-channel registers */
171
        unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
172
        addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
173
        switch( addr )
174
        {
175
        case DMA_CH_CSR: return dma_read_ch_csr( &(dma->ch[chno]) );
176
        case DMA_CH_SZ: return dma->ch[chno].regs.sz;
177
        case DMA_CH_A0: return dma->ch[chno].regs.a0;
178
        case DMA_CH_AM0: return dma->ch[chno].regs.am0;
179
        case DMA_CH_A1: return dma->ch[chno].regs.a1;
180
        case DMA_CH_AM1: return dma->ch[chno].regs.am1;
181
        case DMA_CH_DESC: return dma->ch[chno].regs.desc;
182
        case DMA_CH_SWPTR: return dma->ch[chno].regs.swptr;
183
        }
184
    }
185
}
186
 
187
 
188
/* Handle read from a channel CSR */
189
unsigned long dma_read_ch_csr( struct dma_channel *channel )
190
{
191
    unsigned long result = channel->regs.csr;
192
 
193
    /* before returning, clear all relevant bits */
194
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_CHUNK_DONE );
195
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
196
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
197
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
198
 
199
    return result;
200
}
201
 
202
 
203
 
204
/* Write a register */
205
void dma_write( unsigned long addr, unsigned long value )
206
{
207
    unsigned i;
208
    struct dma_controller *dma = NULL;
209
 
210
    /* Find which controller this is */
211
    for ( i = 0; i < NR_DMAS && dma == NULL; ++ i )
212
    {
213
        if ( (addr >= dmas[i].baseaddr) && (addr < dmas[i].baseaddr + DMA_ADDR_SPACE) )
214
            dma = &(dmas[i]);
215
    }
216
 
217
    /* verify we found a controller */
218
    if ( dma == NULL )
219
    {
220
        debug( "dma_write( 0x%08lX ): Out of range\n", addr );
221
        cont_run = 0;
222
        return;
223
    }
224
 
225
    addr -= dma->baseaddr;
226
 
227
    if ( addr % 4 != 0 )
228
    {
229
        debug( "dma_write( 0x%08lX ): Not register-aligned\n", addr + dma->baseaddr );
230
        cont_run = 0;
231
        return;
232
    }
233
 
234
    /* case of global (not per-channel) registers */
235
    if ( addr < DMA_CH_BASE )
236
    {
237
        switch( addr )
238
        {
239
        case DMA_CSR:
240
            if ( TEST_FLAG( value, DMA_CSR, PAUSE ) )
241
                debug( "dma: PAUSE not implemented\n" );
242
            break;
243
 
244
        case DMA_INT_MSK_A: dma->regs.int_msk_a = value; break;
245
        case DMA_INT_MSK_B: dma->regs.int_msk_b = value; break;
246
        case DMA_INT_SRC_A: dma->regs.int_src_a = value; break;
247
        case DMA_INT_SRC_B: dma->regs.int_src_b = value; break;
248
        default:
249
            debug( "dma_write( 0x%08lX ): Illegal register\n", addr + dma->baseaddr );
250
            cont_run = 0;
251
            return;
252
        }
253
    }
254
    else
255
    {
256
        /* case of per-channel registers */
257
        unsigned chno = (addr - DMA_CH_BASE) / DMA_CH_SIZE;
258
        struct dma_channel *channel = &(dma->ch[chno]);
259
        channel->referenced = 1;
260
        addr = (addr - DMA_CH_BASE) % DMA_CH_SIZE;
261
        switch( addr )
262
        {
263
        case DMA_CSR: dma_write_ch_csr( &(dma->ch[chno]), value ); break;
264
        case DMA_CH_SZ: channel->regs.sz = value; break;
265
        case DMA_CH_A0: channel->regs.a0 = value; break;
266
        case DMA_CH_AM0: channel->regs.am0 = value; break;
267
        case DMA_CH_A1: channel->regs.a1 = value; break;
268
        case DMA_CH_AM1: channel->regs.am1 = value; break;
269
        case DMA_CH_DESC: channel->regs.desc = value; break;
270
        case DMA_CH_SWPTR: channel->regs.swptr = value; break;
271
        }
272
    }
273
}
274
 
275
 
276
/* Write a channel CSR
277
 * This ensures only the writable bits are modified.
278
 */
279
void dma_write_ch_csr( struct dma_channel *channel, unsigned long value )
280
{
281
    /* Copy the writable bits to the channel CSR */
282
    channel->regs.csr &= ~DMA_CH_CSR_WRITE_MASK;
283
    channel->regs.csr |= value & DMA_CH_CSR_WRITE_MASK;
284
}
285
 
286
 
287
/*
288
 * Simulation of control signals
289
 * To be used by simulations for other devices, e.g. ethernet
290
 */
291
 
292
void set_dma_req_i( unsigned dma_controller, unsigned channel )
293
{
294
    dmas[dma_controller].ch[channel].dma_req_i = 1;
295
}
296
 
297
void clear_dma_req_i( unsigned dma_controller, unsigned channel )
298
{
299
    dmas[dma_controller].ch[channel].dma_req_i = 0;
300
}
301
 
302
void set_dma_nd_i( unsigned dma_controller, unsigned channel )
303
{
304
    dmas[dma_controller].ch[channel].dma_nd_i = 1;
305
}
306
 
307
void clear_dma_nd_i( unsigned dma_controller, unsigned channel )
308
{
309
    dmas[dma_controller].ch[channel].dma_nd_i = 0;
310
}
311
 
312
unsigned check_dma_acq_o( unsigned dma_controller, unsigned channel )
313
{
314
    return dmas[dma_controller].ch[channel].dma_acq_o;
315
}
316
 
317
 
318
 
319
/* Simulation hook. Must be called every clock cycle to simulate DMA. */
320
void dma_clock()
321
{
322
    unsigned i;
323
    for ( i = 0; i < NR_DMAS; ++ i )
324
    {
325
        if ( dmas[i].baseaddr != 0 )
326
            dma_controller_clock( &(dmas[i]) );
327
    }
328
}
329
 
330
 
331
/* Clock tick for one DMA controller.
332
 * This does the actual "DMA" operation.
333
 * One chunk is transferred per clock.
334
 */
335
void dma_controller_clock( struct dma_controller *dma )
336
{
337
    unsigned chno, i;
338
    int breakpoint = 0;
339
 
340
    for ( chno = 0; chno < DMA_NUM_CHANNELS; ++ chno )
341
    {
342
        struct dma_channel *channel = &(dma->ch[chno]);
343
 
344
        /* check if this channel is enabled */
345
        if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN ) )
346
            continue;
347
 
348
        /* Do we need to abort? */
349
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, STOP ) )
350
        {
351
            debug( "DMA: STOP requested\n" );
352
            CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
353
            CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
354
            SET_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
355
 
356
            if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_ERR ) &&
357
                 (channel->controller->regs.int_msk_a & channel->channel_mask) )
358
            {
359
                SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_ERR );
360
                channel->controller->regs.int_src_a = channel->channel_mask;
361
                report_interrupt( INT_DMA );
362
            }
363
 
364
            continue;
365
        }
366
 
367
        /* In HW Handshake mode, only work when dma_req_i asserted */
368
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, MODE ) &&
369
             !channel->dma_req_i )
370
        {
371
            debug( "DMA: Waiting for HW handshake\n" );
372
            continue;
373
        }
374
 
375
        /* If this is the first cycle of the transfer, initialize our state */
376
        if ( !TEST_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY ) )
377
        {
378
            CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
379
            CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
380
            SET_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
381
 
382
            /* If using linked lists, copy the appropriate fields to our registers */
383
            if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
384
                dma_load_descriptor( channel );
385
            else
386
                channel->load_next_descriptor_when_done = 0;
387
 
388
            /* Set our internal status */
389
            dma_init_transfer( channel );
390
 
391
            /* Might need to skip descriptor */
392
            if ( CHANNEL_ND_I( channel ) )
393
            {
394
                debug( "DMA: dma_nd_i asserted before dma_req_i, skipping descriptor\n" );
395
                dma_channel_terminate_transfer( channel, 0 );
396
                continue;
397
            }
398
        }
399
 
400
        /* Transfer one word */
401
        set_mem32( channel->destination, eval_mem32( channel->source, &breakpoint ), &breakpoint );
402
 
403
        /* Advance the source and destionation pointers */
404
        masked_increase( &(channel->source), channel->source_mask );
405
        masked_increase( &(channel->destination), channel->destination_mask );
406
        ++ channel->words_transferred;
407
 
408
        /* Have we finished a whole chunk? */
409
        channel->dma_acq_o = (channel->words_transferred % channel->chunk_size == 0);
410
 
411
        /* When done with a chunk, check for dma_nd_i */
412
        if ( CHANNEL_ND_I( channel ) )
413
        {
414
            debug( "DMA: dma_nd_i asserted, \n" );
415
            dma_channel_terminate_transfer( channel, 0 );
416
            continue;
417
        }
418
 
419
        /* Are we done? */
420
        if ( channel->words_transferred >= channel->total_size )
421
            dma_channel_terminate_transfer( channel, 1 );
422
    }
423
}
424
 
425
 
426
/* Copy relevant valued from linked list descriptor to channel registers */
427
void dma_load_descriptor( struct dma_channel *channel )
428
{
429
    int breakpoint = 0;
430
    unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
431
 
432
    channel->load_next_descriptor_when_done = !TEST_FLAG( desc_csr, DMA_DESC_CSR, EOL );
433
 
434
    ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_SRC ) );
435
    ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST, TEST_FLAG( desc_csr, DMA_DESC_CSR, INC_DST ) );
436
    ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, SRC_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, SRC_SEL ) );
437
    ASSIGN_FLAG( channel->regs.csr, DMA_CH_CSR, DST_SEL, TEST_FLAG( desc_csr, DMA_DESC_CSR, DST_SEL ) );
438
 
439
    SET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ,  GET_FIELD( desc_csr, DMA_DESC_CSR, TOT_SZ ) );
440
 
441
    channel->regs.a0 = eval_mem32( channel->regs.desc + DMA_DESC_ADR0, &breakpoint );
442
    channel->regs.a1 = eval_mem32( channel->regs.desc + DMA_DESC_ADR1, &breakpoint );
443
 
444
    channel->current_descriptor = channel->regs.desc;
445
    channel->regs.desc = eval_mem32( channel->regs.desc + DMA_DESC_NEXT, &breakpoint );
446
}
447
 
448
 
449
/* Initialize internal parameters used to implement transfers */
450
void dma_init_transfer( struct dma_channel *channel )
451
{
452
    channel->source = channel->regs.a0;
453
    channel->destination = channel->regs.a1;
454
    channel->source_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_SRC ) ? channel->regs.am0 : 0;
455
    channel->destination_mask = TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INC_DST ) ? channel->regs.am1 : 0;
456
    channel->total_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, TOT_SZ );
457
    channel->chunk_size = GET_FIELD( channel->regs.sz, DMA_CH_SZ, CHK_SZ );
458
    if ( !channel->chunk_size || (channel->chunk_size > channel->total_size) )
459
        channel->chunk_size = channel->total_size;
460
    channel->words_transferred = 0;
461
}
462
 
463
 
464
/* Take care of transfer termination */
465
void dma_channel_terminate_transfer( struct dma_channel *channel, int generate_interrupt )
466
{
467
    /* Might be working in a linked list */
468
    if ( channel->load_next_descriptor_when_done )
469
    {
470
        dma_load_descriptor( channel );
471
        dma_init_transfer( channel );
472
        return;
473
    }
474
 
475
    /* Might be in auto-restart mode */
476
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, ARS ) )
477
    {
478
        dma_init_transfer( channel );
479
        return;
480
    }
481
 
482
    /* If needed, write amount of data transferred back to memory */
483
    if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, SZ_WB ) &&
484
         TEST_FLAG( channel->regs.csr, DMA_CH_CSR, USE_ED ) )
485
    {
486
        int breakpoint = 0;
487
        unsigned long desc_csr = eval_mem32( channel->regs.desc + DMA_DESC_CSR, &breakpoint );
488
        /* TODO: What should we write back? Doc says "total number of remaining bytes" !? */
489
        unsigned long remaining_words = channel->total_size - channel->words_transferred;
490
        SET_FIELD( channel->regs.sz, DMA_DESC_CSR, TOT_SZ, remaining_words );
491
    }
492
 
493
    /* Mark end of transfer */
494
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, CH_EN );
495
    SET_FLAG( channel->regs.csr, DMA_CH_CSR, DONE );
496
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, ERR );
497
    CLEAR_FLAG( channel->regs.csr, DMA_CH_CSR, BUSY );
498
 
499
    /* If needed, generate interrupt */
500
    if ( generate_interrupt )
501
    {
502
        /* TODO: Which channel should we interrupt? */
503
        if ( TEST_FLAG( channel->regs.csr, DMA_CH_CSR, INE_DONE ) &&
504
             (channel->controller->regs.int_msk_a & channel->channel_mask) )
505
        {
506
            SET_FLAG( channel->regs.csr, DMA_CH_CSR, INT_DONE );
507
            channel->controller->regs.int_src_a = channel->channel_mask;
508
            report_interrupt( INT_DMA );
509
        }
510
    }
511
}
512
 
513
/* Utility function: Add 4 to a value with a mask */
514
void masked_increase( unsigned long *value, unsigned long mask )
515
{
516
    *value = (*value & ~mask) | ((*value & mask) + 4);
517
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.