OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [rc203soc/] [sw/] [uClinux/] [include/] [asm-m68knommu/] [dma.h] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1633 jcastillo
#ifndef _M68K_DMA_H
2
#define _M68K_DMA_H 1
3
 
4
#include <linux/config.h>
5
 
6
#ifdef CONFIG_COLDFIRE
7
/*
8
 * ColdFire DMA Model:
9
 *   ColdFire DMA supports two forms of DMA: Single and Dual address. Single
10
 * address mode emits a source address, and expects that the device will either
11
 * pick up the data (DMA READ) or source data (DMA WRITE). This implies that
12
 * the device will place data on the correct byte(s) of the data bus, as the
13
 * memory transactions are always 32 bits. This implies that only 32 bit
14
 * devices will find single mode transfers useful. Dual address DMA mode
15
 * performs two cycles: source read and destination write. ColdFire will
16
 * align the data so that the device will always get the correct bytes, thus
17
 * is useful for 8 and 16 bit devices. This is the mode that is supported
18
 * below.
19
 */
20
 
21
#include <asm/coldfire.h>
22
#include <asm/mcfsim.h>
23
#include <asm/mcfdma.h>
24
 
25
/*
26
 * Set number of channels of DMA on ColdFire for different implementations
27
 */
28
#if defined(CONFIG_M5307)
29
#define MAX_DMA_CHANNELS 4
30
#else
31
#define MAX_DMA_CHANNELS 2
32
#endif
33
 
34
extern unsigned int dma_base_addr[];
35
 
36
/* Storage for where to write/read DMA data to/from */
37
unsigned int dma_device_address[MAX_DMA_CHANNELS];
38
 
39
#define DMA_MODE_WRITE_BIT 0x01  /* Memory/IO to IO/Memory select */
40
#define DMA_MODE_WORD_BIT  0x02  /* 8 or 16 bit transfers */
41
 
42
/* I/O to memory, 8 bits, mode */
43
#define DMA_MODE_READ            0
44
/* memory to I/O, 8 bits, mode */
45
#define DMA_MODE_WRITE           1
46
/* I/O to memory, 16 bits, mode */
47
#define DMA_MODE_READ_WORD       2
48
/* memory to I/O, 16 bits, mode */
49
#define DMA_MODE_WRITE_WORD      3
50
 
51
/* enable/disable a specific DMA channel */
52
static __inline__ void enable_dma(unsigned int dmanr)
53
{
54
  volatile unsigned short *dmawp;
55
 
56
  dmawp = (unsigned short *) dma_base_addr[dmanr];
57
  dmawp[MCFDMA_DCR] |= MCFDMA_DCR_EEXT;
58
 
59
}
60
 
61
static __inline__ void disable_dma(unsigned int dmanr)
62
{
63
  volatile unsigned short *dmawp;
64
  volatile unsigned char  *dmapb;
65
 
66
  dmawp = (unsigned short *) dma_base_addr[dmanr];
67
  dmapb = (unsigned char *) dma_base_addr[dmanr];
68
 
69
  /* Turn off external requests, and stop any DMA in progress */
70
  dmawp[MCFDMA_DCR] &= ~MCFDMA_DCR_EEXT;
71
  dmapb[MCFDMA_DSR] = MCFDMA_DSR_DONE;
72
}
73
 
74
/* Clear the 'DMA Pointer Flip Flop'.
75
 * Write 0 for LSB/MSB, 1 for MSB/LSB access.
76
 * Use this once to initialize the FF to a known state.
77
 * After that, keep track of it. :-)
78
 * --- In order to do that, the DMA routines below should ---
79
 * --- only be used while interrupts are disabled! ---
80
 *
81
 * This is a NOP for ColdFire. Provide a stub for compatibility.
82
 */
83
 
84
static __inline__ void clear_dma_ff(unsigned int dmanr)
85
{
86
}
87
 
88
/* set mode (above) for a specific DMA channel */
89
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
90
{
91
  volatile unsigned char  *dmabp;
92
  volatile unsigned short *dmawp;
93
 
94
  dmabp = (unsigned char *) dma_base_addr[dmanr];
95
  dmawp = (unsigned short *) dma_base_addr[dmanr];
96
 
97
  // Clear config errors
98
  dmabp[MCFDMA_DSR] = MCFDMA_DSR_DONE;
99
 
100
  // Set command register
101
  dmawp[MCFDMA_DCR] =
102
    MCFDMA_DCR_INT |         // Enable completion irq
103
    MCFDMA_DCR_CS |          // Force one xfer per request
104
    MCFDMA_DCR_AA |          // Enable auto alignment
105
    // Memory to I/O or I/O to Memory
106
    ((mode & DMA_MODE_WRITE_BIT) ? MCFDMA_DCR_SINC : MCFDMA_DCR_DINC) |
107
    // 16 bit or 8 bit transfers
108
    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_SSIZE_WORD :
109
                                   MCFDMA_DCR_SSIZE_BYTE) |
110
    ((mode & DMA_MODE_WORD_BIT)  ? MCFDMA_DCR_DSIZE_WORD :
111
                                   MCFDMA_DCR_DSIZE_BYTE) ;
112
 
113
#ifdef DMA_DEBUG
114
  printk("%s: Setting stat %x: %x ctrl %x: %x regs for chan %d\n",
115
         __FUNCTION__,
116
         &dmabp[MCFDMA_DSR], dmabp[MCFDMA_DSR],
117
         &dmawp[MCFDMA_DCR], dmawp[MCFDMA_DCR],
118
         dmanr);
119
#endif
120
}
121
/* Set transfer address for specific DMA channel */
122
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) {
123
  volatile unsigned short *dmawp;
124
  volatile unsigned int   *dmalp;
125
 
126
  dmawp = (unsigned short *) dma_base_addr[dmanr];
127
  dmalp = (unsigned int *) dma_base_addr[dmanr];
128
 
129
  // Determine which address registers are used for memory/device accesses
130
  if (dmawp[MCFDMA_DCR] & MCFDMA_DCR_SINC) {
131
    // Source incrementing, must be memory
132
    dmalp[MCFDMA_SAR] = a;
133
    // Set dest address, must be device
134
    dmalp[MCFDMA_DAR] = dma_device_address[dmanr];
135
  }
136
  else {
137
    // Destination incrementing, must be memory
138
    dmalp[MCFDMA_DAR] = a;
139
    // Set source address, must be device
140
    dmalp[MCFDMA_SAR] = dma_device_address[dmanr];
141
  }
142
 
143
#ifdef DMA_DEBUG
144
  printk("%s: Setting src %x dest %x addr for chan %d\n",
145
         __FUNCTION__, dmalp[MCFDMA_SAR], dmalp[MCFDMA_DAR], dmanr);
146
#endif
147
}
148
 
149
/*
150
 * Specific for Coldfire - sets device address.
151
 * Should be called after the mode set call, and before set DMA address.
152
 */
153
static __inline__ void set_dma_device_addr(unsigned int dmanr, unsigned int a) {
154
  dma_device_address[dmanr] = a;
155
#ifdef DMA_DEBUG
156
  printk("%s: Setting device addr %x for chan %d\n",
157
         __FUNCTION__, dma_device_address[dmanr], dmanr);
158
#endif DMA_DEBUG
159
}
160
 
161
/*
162
 * NOTE 2: "count" represents _bytes_.
163
 */
164
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
165
{
166
  volatile unsigned short *dmawp;
167
 
168
  dmawp = (unsigned short *) dma_base_addr[dmanr];
169
  dmawp[MCFDMA_BCR] = (unsigned short)count;
170
 
171
#ifdef DMA_DEBUG
172
  printk("%s: Setting count %x for chan %d\n",
173
         __FUNCTION__, (unsigned short)count , dmanr);
174
#endif DMA_DEBUG
175
 
176
}
177
 
178
/* Get DMA residue count. After a DMA transfer, this
179
 * should return zero. Reading this while a DMA transfer is
180
 * still in progress will return unpredictable results.
181
 * Otherwise, it returns the number of _bytes_ left to transfer.
182
 *
183
 */
184
static __inline__ int get_dma_residue(unsigned int dmanr)
185
{
186
  volatile unsigned short *dmawp;
187
  unsigned short count;
188
 
189
  dmawp = (unsigned short *) dma_base_addr[dmanr];
190
  count = dmawp[MCFDMA_BCR];
191
  return((int) count);
192
}
193
 
194
#else
195
 
196
 #define MAX_DMA_CHANNELS 8
197
 
198
#endif /* CONFIG_COLDFIRE */
199
 
200
/* Don't define MAX_DMA_ADDRESS; it's useless on the m68k/coldfire and any
201
   occurrence should be flagged as an error.  */
202
 
203
/* These are in kernel/dma.c: */
204
 extern int request_dma(unsigned int dmanr, const char * device_id);    /* reserve a DMA channel */
205
 extern void free_dma(unsigned int dmanr);      /* release it again */
206
 
207
#endif /* _M68K_DMA_H */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.