1 |
1275 |
phoenix |
/* $Id: dma.h,v 1.1.1.1 2004-04-15 02:38:36 phoenix Exp $
|
2 |
|
|
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
|
3 |
|
|
* Written by Hennus Bergman, 1992.
|
4 |
|
|
* High DMA channel support & info by Hannu Savolainen
|
5 |
|
|
* and John Boyd, Nov. 1992.
|
6 |
|
|
* (c) Copyright 2000, Grant Grundler
|
7 |
|
|
*/
|
8 |
|
|
|
9 |
|
|
#ifndef _ASM_DMA_H
|
10 |
|
|
#define _ASM_DMA_H
|
11 |
|
|
|
12 |
|
|
#include <linux/config.h>
|
13 |
|
|
#include <asm/io.h> /* need byte IO */
|
14 |
|
|
#include <asm/system.h>
|
15 |
|
|
|
16 |
|
|
#define dma_outb outb
|
17 |
|
|
#define dma_inb inb
|
18 |
|
|
|
19 |
|
|
/*
|
20 |
|
|
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
|
21 |
|
|
** (or rather not merge) DMA's into managable chunks.
|
22 |
|
|
** On parisc, this is more of the software/tuning constraint
|
23 |
|
|
** rather than the HW. I/O MMU allocation alogorithms can be
|
24 |
|
|
** faster with smaller size is (to some degree).
|
25 |
|
|
*/
|
26 |
|
|
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
|
27 |
|
|
|
28 |
|
|
/* The maximum address that we can perform a DMA transfer to on this platform
|
29 |
|
|
** New dynamic DMA interfaces should obsolete this....
|
30 |
|
|
*/
|
31 |
|
|
#define MAX_DMA_ADDRESS (~0UL)
|
32 |
|
|
|
33 |
|
|
|
34 |
|
|
/*
|
35 |
|
|
** We don't have DMA channels... well V-class does but the
|
36 |
|
|
** Dynamic DMA Mapping interface will support them... right? :^)
|
37 |
|
|
** Note: this is not relevant right now for PA-RISC, but we cannot
|
38 |
|
|
** leave this as undefined because some things (e.g. sound)
|
39 |
|
|
** won't compile :-(
|
40 |
|
|
*/
|
41 |
|
|
#define MAX_DMA_CHANNELS 8
|
42 |
|
|
#define DMA_MODE_READ 1
|
43 |
|
|
#define DMA_MODE_WRITE 2
|
44 |
|
|
#define DMA_AUTOINIT 0x10
|
45 |
|
|
|
46 |
|
|
/* 8237 DMA controllers */
|
47 |
|
|
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
|
48 |
|
|
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
|
49 |
|
|
|
50 |
|
|
/* DMA controller registers */
|
51 |
|
|
#define DMA1_CMD_REG 0x08 /* command register (w) */
|
52 |
|
|
#define DMA1_STAT_REG 0x08 /* status register (r) */
|
53 |
|
|
#define DMA1_REQ_REG 0x09 /* request register (w) */
|
54 |
|
|
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
|
55 |
|
|
#define DMA1_MODE_REG 0x0B /* mode register (w) */
|
56 |
|
|
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
|
57 |
|
|
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
|
58 |
|
|
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
|
59 |
|
|
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
|
60 |
|
|
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
|
61 |
|
|
#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
|
62 |
|
|
|
63 |
|
|
#define DMA2_CMD_REG 0xD0 /* command register (w) */
|
64 |
|
|
#define DMA2_STAT_REG 0xD0 /* status register (r) */
|
65 |
|
|
#define DMA2_REQ_REG 0xD2 /* request register (w) */
|
66 |
|
|
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
|
67 |
|
|
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
|
68 |
|
|
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
|
69 |
|
|
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
|
70 |
|
|
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
|
71 |
|
|
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
|
72 |
|
|
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
|
73 |
|
|
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
|
74 |
|
|
|
75 |
|
|
extern spinlock_t dma_spin_lock;
|
76 |
|
|
|
77 |
|
|
static __inline__ unsigned long claim_dma_lock(void)
|
78 |
|
|
{
|
79 |
|
|
unsigned long flags;
|
80 |
|
|
spin_lock_irqsave(&dma_spin_lock, flags);
|
81 |
|
|
return flags;
|
82 |
|
|
}
|
83 |
|
|
|
84 |
|
|
static __inline__ void release_dma_lock(unsigned long flags)
|
85 |
|
|
{
|
86 |
|
|
spin_unlock_irqrestore(&dma_spin_lock, flags);
|
87 |
|
|
}
|
88 |
|
|
|
89 |
|
|
|
90 |
|
|
/* Get DMA residue count. After a DMA transfer, this
|
91 |
|
|
* should return zero. Reading this while a DMA transfer is
|
92 |
|
|
* still in progress will return unpredictable results.
|
93 |
|
|
* If called before the channel has been used, it may return 1.
|
94 |
|
|
* Otherwise, it returns the number of _bytes_ left to transfer.
|
95 |
|
|
*
|
96 |
|
|
* Assumes DMA flip-flop is clear.
|
97 |
|
|
*/
|
98 |
|
|
static __inline__ int get_dma_residue(unsigned int dmanr)
|
99 |
|
|
{
|
100 |
|
|
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
|
101 |
|
|
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
|
102 |
|
|
|
103 |
|
|
/* using short to get 16-bit wrap around */
|
104 |
|
|
unsigned short count;
|
105 |
|
|
|
106 |
|
|
count = 1 + dma_inb(io_port);
|
107 |
|
|
count += dma_inb(io_port) << 8;
|
108 |
|
|
|
109 |
|
|
return (dmanr<=3)? count : (count<<1);
|
110 |
|
|
}
|
111 |
|
|
|
112 |
|
|
/* enable/disable a specific DMA channel */
|
113 |
|
|
static __inline__ void enable_dma(unsigned int dmanr)
|
114 |
|
|
{
|
115 |
|
|
#ifdef CONFIG_SUPERIO
|
116 |
|
|
if (dmanr<=3)
|
117 |
|
|
dma_outb(dmanr, DMA1_MASK_REG);
|
118 |
|
|
else
|
119 |
|
|
dma_outb(dmanr & 3, DMA2_MASK_REG);
|
120 |
|
|
#endif
|
121 |
|
|
}
|
122 |
|
|
|
123 |
|
|
static __inline__ void disable_dma(unsigned int dmanr)
|
124 |
|
|
{
|
125 |
|
|
#ifdef CONFIG_SUPERIO
|
126 |
|
|
if (dmanr<=3)
|
127 |
|
|
dma_outb(dmanr | 4, DMA1_MASK_REG);
|
128 |
|
|
else
|
129 |
|
|
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
|
130 |
|
|
#endif
|
131 |
|
|
}
|
132 |
|
|
|
133 |
|
|
/* Clear the 'DMA Pointer Flip Flop'.
|
134 |
|
|
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
|
135 |
|
|
* Use this once to initialize the FF to a known state.
|
136 |
|
|
* After that, keep track of it. :-)
|
137 |
|
|
* --- In order to do that, the DMA routines below should ---
|
138 |
|
|
* --- only be used while holding the DMA lock ! ---
|
139 |
|
|
*/
|
140 |
|
|
static __inline__ void clear_dma_ff(unsigned int dmanr)
|
141 |
|
|
{
|
142 |
|
|
}
|
143 |
|
|
|
144 |
|
|
/* set mode (above) for a specific DMA channel */
|
145 |
|
|
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
|
146 |
|
|
{
|
147 |
|
|
}
|
148 |
|
|
|
149 |
|
|
/* Set only the page register bits of the transfer address.
|
150 |
|
|
* This is used for successive transfers when we know the contents of
|
151 |
|
|
* the lower 16 bits of the DMA current address register, but a 64k boundary
|
152 |
|
|
* may have been crossed.
|
153 |
|
|
*/
|
154 |
|
|
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
|
155 |
|
|
{
|
156 |
|
|
}
|
157 |
|
|
|
158 |
|
|
|
159 |
|
|
/* Set transfer address & page bits for specific DMA channel.
|
160 |
|
|
* Assumes dma flipflop is clear.
|
161 |
|
|
*/
|
162 |
|
|
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
|
163 |
|
|
{
|
164 |
|
|
}
|
165 |
|
|
|
166 |
|
|
|
167 |
|
|
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
|
168 |
|
|
* a specific DMA channel.
|
169 |
|
|
* You must ensure the parameters are valid.
|
170 |
|
|
* NOTE: from a manual: "the number of transfers is one more
|
171 |
|
|
* than the initial word count"! This is taken into account.
|
172 |
|
|
* Assumes dma flip-flop is clear.
|
173 |
|
|
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
|
174 |
|
|
*/
|
175 |
|
|
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
|
176 |
|
|
{
|
177 |
|
|
}
|
178 |
|
|
|
179 |
|
|
|
180 |
|
|
|
181 |
|
|
/* These are in kernel/dma.c: */
|
182 |
|
|
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
|
183 |
|
|
extern void free_dma(unsigned int dmanr); /* release it again */
|
184 |
|
|
extern int get_dma_list(char *buf); /* proc/dma support */
|
185 |
|
|
|
186 |
|
|
#ifdef CONFIG_PCI
|
187 |
|
|
extern int isa_dma_bridge_buggy;
|
188 |
|
|
#else
|
189 |
|
|
#define isa_dma_bridge_buggy (0)
|
190 |
|
|
#endif
|
191 |
|
|
|
192 |
|
|
#endif /* _ASM_DMA_H */
|