1 |
2 |
dsmv |
|
2 |
|
|
#include <linux/kernel.h>
|
3 |
|
|
#include <linux/module.h>
|
4 |
|
|
#include <linux/version.h>
|
5 |
|
|
#include <linux/init.h>
|
6 |
|
|
#include <linux/fs.h>
|
7 |
|
|
#include <linux/ioport.h>
|
8 |
|
|
#include <linux/list.h>
|
9 |
|
|
#include <linux/pci.h>
|
10 |
|
|
#include <linux/proc_fs.h>
|
11 |
|
|
#include <linux/interrupt.h>
|
12 |
|
|
#include <asm/system.h>
|
13 |
|
|
#include <asm/io.h>
|
14 |
|
|
#include <asm/io.h>
|
15 |
|
|
|
16 |
|
|
#include <asm/uaccess.h>
|
17 |
|
|
#include <linux/types.h>
|
18 |
|
|
#include <linux/ioport.h>
|
19 |
|
|
#include <linux/poll.h>
|
20 |
|
|
#include <linux/pci.h>
|
21 |
|
|
#include <linux/interrupt.h>
|
22 |
|
|
|
23 |
|
|
#include "pexmodule.h"
|
24 |
|
|
#include "hardware.h"
|
25 |
|
|
#include "pexioctl.h"
|
26 |
|
|
#include "ioctlrw.h"
|
27 |
|
|
#include "ambpexregs.h"
|
28 |
|
|
#include "pexproc.h"
|
29 |
|
|
|
30 |
|
|
//-----------------------------------------------------------------------------
|
31 |
|
|
|
32 |
|
|
MODULE_AUTHOR("Vladimir Karakozov. karakozov@gmail.com");
|
33 |
|
|
MODULE_LICENSE("GPL");
|
34 |
|
|
|
35 |
|
|
//-----------------------------------------------------------------------------
|
36 |
|
|
|
37 |
|
|
static dev_t devno = MKDEV(0, 0);
|
38 |
|
|
static struct class *pex_class = NULL;
|
39 |
|
|
static LIST_HEAD(device_list);
|
40 |
|
|
static int boards_count = 0;
|
41 |
|
|
static struct mutex pex_mutex;
|
42 |
6 |
v.karak |
int dbg_trace = 1;
|
43 |
2 |
dsmv |
int err_trace = 1;
|
44 |
|
|
|
45 |
|
|
//-----------------------------------------------------------------------------
|
46 |
|
|
|
47 |
|
|
static int free_memory(struct pex_device *brd)
|
48 |
|
|
{
|
49 |
|
|
struct list_head *pos, *n;
|
50 |
|
|
struct mem_t *m = NULL;
|
51 |
|
|
int unlocked = 0;
|
52 |
|
|
|
53 |
|
|
spin_lock(&brd->m_MemListLock);
|
54 |
|
|
|
55 |
|
|
list_for_each_safe(pos, n, &brd->m_MemList) {
|
56 |
|
|
|
57 |
|
|
m = list_entry(pos, struct mem_t, list);
|
58 |
|
|
|
59 |
|
|
unlocked = unlock_pages(m->cpu_addr, m->size);
|
60 |
|
|
|
61 |
|
|
dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle);
|
62 |
|
|
|
63 |
|
|
dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
|
64 |
|
|
__FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked );
|
65 |
|
|
|
66 |
|
|
list_del(pos);
|
67 |
|
|
|
68 |
|
|
atomic_dec(&brd->m_MemListCount);
|
69 |
|
|
|
70 |
|
|
kfree(m);
|
71 |
|
|
}
|
72 |
|
|
|
73 |
|
|
spin_unlock(&brd->m_MemListLock);
|
74 |
|
|
|
75 |
|
|
return 0;
|
76 |
|
|
}
|
77 |
|
|
|
78 |
|
|
//-----------------------------------------------------------------------------
|
79 |
|
|
|
80 |
|
|
static struct pex_device *file_to_device( struct file *file )
|
81 |
|
|
{
|
82 |
|
|
return (struct pex_device*)file->private_data;
|
83 |
|
|
}
|
84 |
|
|
|
85 |
|
|
//-----------------------------------------------------------------------------
|
86 |
|
|
|
87 |
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
|
88 |
|
|
static struct pex_device *inode_to_device( struct list_head *head, struct inode *inode )
|
89 |
|
|
{
|
90 |
|
|
struct list_head *p;
|
91 |
|
|
struct pex_device *entry;
|
92 |
|
|
unsigned int minor = MINOR(inode->i_rdev);
|
93 |
|
|
|
94 |
|
|
list_for_each(p, head) {
|
95 |
|
|
entry = list_entry(p, struct pex_device, m_list);
|
96 |
|
|
if(entry->m_BoardIndex == minor)
|
97 |
|
|
return entry;
|
98 |
|
|
}
|
99 |
|
|
|
100 |
|
|
return NULL;
|
101 |
|
|
}
|
102 |
|
|
#endif
|
103 |
|
|
|
104 |
|
|
//-----------------------------------------------------------------------------
|
105 |
|
|
|
106 |
|
|
static int pex_device_fasync(int fd, struct file *file, int mode)
|
107 |
|
|
{
|
108 |
|
|
struct pex_device *pDevice = file->private_data;
|
109 |
|
|
if(!pDevice)
|
110 |
|
|
return -ENODEV;
|
111 |
|
|
|
112 |
|
|
return 0;
|
113 |
|
|
}
|
114 |
|
|
|
115 |
|
|
//-----------------------------------------------------------------------------
|
116 |
|
|
|
117 |
|
|
static unsigned int pex_device_poll(struct file *filp, poll_table *wait)
|
118 |
|
|
{
|
119 |
|
|
unsigned int mask = 0;
|
120 |
|
|
|
121 |
|
|
struct pex_device *pDevice = file_to_device(filp);
|
122 |
|
|
if(!pDevice)
|
123 |
|
|
return -ENODEV;
|
124 |
|
|
|
125 |
|
|
return mask;
|
126 |
|
|
}
|
127 |
|
|
|
128 |
|
|
//-----------------------------------------------------------------------------
|
129 |
|
|
|
130 |
|
|
static int pex_device_open( struct inode *inode, struct file *file )
|
131 |
|
|
{
|
132 |
|
|
struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
|
133 |
|
|
if(!pDevice) {
|
134 |
|
|
err_msg(err_trace, "%s(): Open device failed\n", __FUNCTION__);
|
135 |
|
|
return -ENODEV;
|
136 |
|
|
}
|
137 |
|
|
|
138 |
|
|
file->private_data = (void*)pDevice;
|
139 |
|
|
|
140 |
|
|
dbg_msg(dbg_trace, "%s(): Open device %s\n", __FUNCTION__, pDevice->m_name);
|
141 |
|
|
|
142 |
|
|
return 0;
|
143 |
|
|
}
|
144 |
|
|
|
145 |
|
|
//-----------------------------------------------------------------------------
|
146 |
|
|
|
147 |
|
|
static int pex_device_close( struct inode *inode, struct file *file )
|
148 |
|
|
{
|
149 |
|
|
struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
|
150 |
|
|
if(!pDevice) {
|
151 |
|
|
err_msg(err_trace, "%s(): Close device failed\n", __FUNCTION__);
|
152 |
|
|
return -ENODEV;
|
153 |
|
|
}
|
154 |
|
|
|
155 |
|
|
file->private_data = NULL;
|
156 |
|
|
|
157 |
|
|
dbg_msg(dbg_trace, "%s(): Close device %s\n", __FUNCTION__, pDevice->m_name);
|
158 |
|
|
|
159 |
|
|
return 0;
|
160 |
|
|
}
|
161 |
|
|
|
162 |
|
|
//-----------------------------------------------------------------------------
|
163 |
|
|
|
164 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
|
165 |
|
|
static long pex_device_ioctl( struct file *file, unsigned int cmd, unsigned long arg )
|
166 |
|
|
#else
|
167 |
|
|
static int pex_device_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg )
|
168 |
|
|
#endif
|
169 |
|
|
{
|
170 |
|
|
int error = 0;
|
171 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
|
172 |
|
|
struct pex_device *pDevice = file_to_device(file);
|
173 |
|
|
#else
|
174 |
|
|
struct pex_device *pDevice = inode_to_device(&device_list, inode);
|
175 |
|
|
#endif
|
176 |
|
|
if(!pDevice) {
|
177 |
|
|
err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
|
178 |
|
|
return -ENODEV;
|
179 |
|
|
}
|
180 |
|
|
|
181 |
|
|
mutex_lock(&pDevice->m_BoardMutex);
|
182 |
|
|
|
183 |
|
|
switch(cmd) {
|
184 |
|
|
case IOCTL_PEX_BOARD_INFO:
|
185 |
|
|
error = ioctl_board_info(pDevice, arg);
|
186 |
|
|
break;
|
187 |
|
|
case IOCTL_PEX_MEM_ALLOC:
|
188 |
|
|
error = ioctl_memory_alloc(pDevice, arg);
|
189 |
|
|
break;
|
190 |
|
|
case IOCTL_PEX_MEM_FREE:
|
191 |
|
|
error = ioctl_memory_free(pDevice, arg);
|
192 |
|
|
break;
|
193 |
|
|
case IOCTL_PEX_STUB_ALLOC:
|
194 |
|
|
error = ioctl_stub_alloc(pDevice, arg);
|
195 |
|
|
break;
|
196 |
|
|
case IOCTL_PEX_STUB_FREE:
|
197 |
|
|
error = ioctl_stub_free(pDevice, arg);
|
198 |
|
|
break;
|
199 |
|
|
case IOCTL_AMB_SET_MEMIO:
|
200 |
|
|
error = ioctl_set_mem(pDevice, arg);
|
201 |
|
|
break;
|
202 |
|
|
case IOCTL_AMB_FREE_MEMIO:
|
203 |
|
|
error = ioctl_free_mem(pDevice, arg);
|
204 |
|
|
break;
|
205 |
|
|
case IOCTL_AMB_START_MEMIO:
|
206 |
|
|
error = ioctl_start_mem(pDevice, arg);
|
207 |
|
|
break;
|
208 |
|
|
case IOCTL_AMB_STOP_MEMIO:
|
209 |
|
|
error = ioctl_stop_mem(pDevice, arg);
|
210 |
|
|
break;
|
211 |
|
|
case IOCTL_AMB_STATE_MEMIO:
|
212 |
|
|
error = ioctl_state_mem(pDevice, arg);
|
213 |
|
|
break;
|
214 |
|
|
case IOCTL_AMB_WAIT_DMA_BUFFER:
|
215 |
|
|
error = ioctl_wait_dma_buffer(pDevice, arg);
|
216 |
|
|
break;
|
217 |
|
|
case IOCTL_AMB_WAIT_DMA_BLOCK:
|
218 |
|
|
error = ioctl_wait_dma_block(pDevice, arg);
|
219 |
|
|
break;
|
220 |
|
|
case IOCTL_AMB_SET_SRC_MEM:
|
221 |
|
|
error = ioctl_set_src_mem(pDevice, arg);
|
222 |
|
|
break;
|
223 |
|
|
case IOCTL_AMB_SET_DIR_MEM:
|
224 |
|
|
error = ioctl_set_dir_mem(pDevice, arg);
|
225 |
|
|
break;
|
226 |
|
|
case IOCTL_AMB_SET_DRQ_MEM:
|
227 |
|
|
error = ioctl_set_drq_mem(pDevice, arg);
|
228 |
|
|
break;
|
229 |
|
|
case IOCTL_AMB_RESET_FIFO:
|
230 |
|
|
error = ioctl_reset_fifo(pDevice, arg);
|
231 |
|
|
break;
|
232 |
|
|
case IOCTL_AMB_DONE:
|
233 |
|
|
error = ioctl_done(pDevice, arg);
|
234 |
|
|
break;
|
235 |
|
|
case IOCTL_AMB_ADJUST:
|
236 |
|
|
error = ioctl_adjust(pDevice, arg);
|
237 |
|
|
break;
|
238 |
|
|
|
239 |
|
|
default:
|
240 |
|
|
dbg_msg(dbg_trace, "%s(): Unknown command\n", __FUNCTION__);
|
241 |
|
|
error = -EINVAL;
|
242 |
|
|
break;
|
243 |
|
|
}
|
244 |
|
|
|
245 |
|
|
mutex_unlock(&pDevice->m_BoardMutex);
|
246 |
|
|
|
247 |
|
|
return error;
|
248 |
|
|
}
|
249 |
|
|
|
250 |
|
|
//-----------------------------------------------------------------------------
|
251 |
|
|
|
252 |
|
|
static inline int private_mapping_ok(struct vm_area_struct *vma)
|
253 |
|
|
{
|
254 |
|
|
return vma->vm_flags & VM_MAYSHARE;
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
//-----------------------------------------------------------------------------
|
258 |
|
|
|
259 |
|
|
static int pex_device_mmap(struct file *file, struct vm_area_struct *vma)
|
260 |
|
|
{
|
261 |
|
|
size_t size = vma->vm_end - vma->vm_start;
|
262 |
|
|
|
263 |
|
|
if (!private_mapping_ok(vma))
|
264 |
|
|
return -ENOSYS;
|
265 |
|
|
|
266 |
|
|
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
267 |
|
|
|
268 |
|
|
if (remap_pfn_range(vma,
|
269 |
|
|
vma->vm_start,
|
270 |
|
|
vma->vm_pgoff,
|
271 |
|
|
size,
|
272 |
|
|
vma->vm_page_prot)) {
|
273 |
|
|
err_msg(err_trace, "%s(): error in remap_page_range.\n", __FUNCTION__ );
|
274 |
|
|
return -EAGAIN;
|
275 |
|
|
}
|
276 |
|
|
return 0;
|
277 |
|
|
}
|
278 |
|
|
|
279 |
|
|
//-----------------------------------------------------------------------------
|
280 |
|
|
|
281 |
|
|
static ssize_t pex_device_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long count, loff_t off)
|
282 |
|
|
{
|
283 |
|
|
struct pex_device *pDevice = file_to_device(iocb->ki_filp);
|
284 |
|
|
if(!pDevice) {
|
285 |
|
|
err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
|
286 |
|
|
return -ENODEV;
|
287 |
|
|
}
|
288 |
|
|
return -ENOSYS;
|
289 |
|
|
}
|
290 |
|
|
|
291 |
|
|
//-----------------------------------------------------------------------------
|
292 |
|
|
|
293 |
|
|
static ssize_t pex_device_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long count, loff_t off)
|
294 |
|
|
{
|
295 |
|
|
struct pex_device *pDevice = file_to_device(iocb->ki_filp);
|
296 |
|
|
if(!pDevice) {
|
297 |
|
|
err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
|
298 |
|
|
return -ENODEV;
|
299 |
|
|
}
|
300 |
|
|
return -ENOSYS;
|
301 |
|
|
}
|
302 |
|
|
|
303 |
|
|
//-----------------------------------------------------------------------------
|
304 |
|
|
|
305 |
|
|
static irqreturn_t pex_device_isr( int irq, void *pContext )
|
306 |
|
|
{
|
307 |
|
|
FIFO_STATUS FifoStatus; //
|
308 |
|
|
|
309 |
|
|
struct pex_device* pDevice = (struct pex_device*)pContext; // our device
|
310 |
|
|
|
311 |
|
|
if(!pDevice->m_DmaIrqEnbl && !pDevice->m_FlgIrqEnbl)
|
312 |
|
|
return IRQ_NONE;
|
313 |
|
|
|
314 |
|
|
if(pDevice->m_FlgIrqEnbl)
|
315 |
|
|
{ // прерывание от флагов состояния
|
316 |
|
|
/*
|
317 |
|
|
u32 status = ReadOperationWordReg(pDevice, PEMAINadr_BRD_STATUS);
|
318 |
|
|
err_msg(err_trace, "%s(): BRD_STATUS = 0x%X.\n", __FUNCTION__, status);
|
319 |
|
|
if(status & 0x4000)
|
320 |
|
|
{
|
321 |
|
|
for(int i = 0; i < NUM_TETR_IRQ; i++)
|
322 |
|
|
if(pDevice->m_TetrIrq[i] != 0)
|
323 |
|
|
{
|
324 |
|
|
u32 status = ReadAmbMainReg(pDevice, pDevice->m_TetrIrq[i].Address);
|
325 |
|
|
KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
|
326 |
|
|
i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
|
327 |
|
|
status ^= pDevice->m_TetrIrq[i].IrqInv;
|
328 |
|
|
status &= pDevice->m_TetrIrq[i].IrqMask;
|
329 |
|
|
KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
|
330 |
|
|
i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
|
331 |
|
|
if(status)
|
332 |
|
|
{
|
333 |
|
|
KeInsertQueueDpc(&pDevice->m_TetrIrq[i].Dpc, NULL, NULL);
|
334 |
|
|
KdPrint(("CWambpex::WambpexIsr - Tetrad IRQ address = %d\n", pDevice->m_TetrIrq[i].Address));
|
335 |
|
|
// сброс статусного бита, вызвавшего прерывание
|
336 |
|
|
//pDevice->WriteAmbMainReg(pDevice->m_TetrIrq[i].Address + 0x200);
|
337 |
|
|
ULONG CmdAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_ADR * REG_SIZE;
|
338 |
|
|
pDevice->WriteAmbMainReg(CmdAddress, 0);
|
339 |
|
|
ULONG DataAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_DATA * REG_SIZE;
|
340 |
|
|
ULONG Mode0Value = pDevice->ReadAmbMainReg(DataAddress);
|
341 |
|
|
Mode0Value &= 0xFFFB;
|
342 |
|
|
//pDevice->WriteAmbMainReg(CmdAddress, 0);
|
343 |
|
|
pDevice->WriteAmbMainReg(DataAddress, Mode0Value);
|
344 |
|
|
break;
|
345 |
|
|
}
|
346 |
|
|
}
|
347 |
|
|
return IRQ_HANDLED;
|
348 |
|
|
}
|
349 |
|
|
else // вообще не наше прерывание !!!
|
350 |
|
|
return IRQ_NONE; // we did not interrupt
|
351 |
|
|
*/
|
352 |
|
|
}
|
353 |
|
|
|
354 |
|
|
if(pDevice->m_DmaIrqEnbl)
|
355 |
|
|
{ // прерывание от каналов ПДП
|
356 |
|
|
u32 i=0;
|
357 |
|
|
u32 FifoAddr = 0;
|
358 |
|
|
u32 iChan = pDevice->m_primChan;
|
359 |
|
|
u32 NumberOfChannel = -1;
|
360 |
|
|
|
361 |
|
|
for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++)
|
362 |
|
|
{
|
363 |
|
|
if(pDevice->m_DmaChanMask & (1 << iChan))
|
364 |
|
|
{
|
365 |
|
|
FifoAddr = pDevice->m_FifoAddr[iChan];
|
366 |
|
|
FifoStatus.AsWhole = ReadOperationWordReg(pDevice, PEFIFOadr_FIFO_STATUS + FifoAddr);
|
367 |
|
|
if(FifoStatus.ByBits.IntRql)
|
368 |
|
|
{
|
369 |
|
|
//err_msg(err_trace, "%s(): - Channel = %d, Fifo Status = 0x%X\n", __FUNCTION__, iChan, FifoStatus.AsWhole);
|
370 |
|
|
NumberOfChannel = iChan;
|
371 |
|
|
pDevice->m_primChan = ((pDevice->m_primChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : pDevice->m_primChan+1;
|
372 |
|
|
break;
|
373 |
|
|
}
|
374 |
|
|
}
|
375 |
|
|
iChan = ((iChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : iChan+1;
|
376 |
|
|
}
|
377 |
|
|
|
378 |
|
|
if(NumberOfChannel != -1)
|
379 |
|
|
{
|
380 |
|
|
u32 flag = 0;
|
381 |
|
|
|
382 |
|
|
//err_msg(err_trace, "%s(%d)\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ));
|
383 |
|
|
|
384 |
|
|
flag = NextDmaTransfer(pDevice->m_DmaChannel[NumberOfChannel]);
|
385 |
30 |
dsmv |
//if(!flag)
|
386 |
|
|
if( 0 )
|
387 |
2 |
dsmv |
{
|
388 |
|
|
DMA_CTRL_EXT CtrlExt;
|
389 |
|
|
CtrlExt.AsWhole = 0;
|
390 |
|
|
CtrlExt.ByBits.Pause = 1;
|
391 |
|
|
CtrlExt.ByBits.Start = 1;
|
392 |
|
|
WriteOperationWordReg(pDevice, PEFIFOadr_DMA_CTRL + FifoAddr, CtrlExt.AsWhole);
|
393 |
|
|
//err_msg(err_trace, "%s(): - Pause (%d) - m_CurBlockNum = %d, m_DoneBlock = %d\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ),
|
394 |
|
|
// pDevice->m_DmaChannel[NumberOfChannel]->m_CurBlockNum,
|
395 |
|
|
// pDevice->m_DmaChannel[NumberOfChannel]->m_DoneBlock);
|
396 |
|
|
}
|
397 |
|
|
|
398 |
|
|
//err_msg(err_trace, "%s(): - Flag Clear\n", __FUNCTION__);
|
399 |
|
|
WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x10);
|
400 |
|
|
WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x00);
|
401 |
|
|
//err_msg(err_trace, "%s(): - Complete\n", __FUNCTION__);
|
402 |
|
|
|
403 |
|
|
atomic_inc(&pDevice->m_TotalIRQ);
|
404 |
|
|
|
405 |
|
|
return IRQ_HANDLED;
|
406 |
|
|
}
|
407 |
|
|
}
|
408 |
|
|
return IRQ_NONE; // we did not interrupt
|
409 |
|
|
}
|
410 |
|
|
|
411 |
|
|
//-----------------------------------------------------------------------------
|
412 |
|
|
|
413 |
|
|
struct file_operations pex_fops = {
|
414 |
|
|
|
415 |
|
|
.owner = THIS_MODULE,
|
416 |
|
|
.read = NULL,
|
417 |
|
|
.write = NULL,
|
418 |
|
|
|
419 |
|
|
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
|
420 |
|
|
.unlocked_ioctl = pex_device_ioctl,
|
421 |
|
|
.compat_ioctl = pex_device_ioctl,
|
422 |
|
|
#else
|
423 |
|
|
.ioctl = pex_device_ioctl,
|
424 |
|
|
#endif
|
425 |
|
|
|
426 |
|
|
.mmap = pex_device_mmap,
|
427 |
|
|
.open = pex_device_open,
|
428 |
|
|
.release = pex_device_close,
|
429 |
|
|
.fasync = pex_device_fasync,
|
430 |
|
|
.poll = pex_device_poll,
|
431 |
|
|
.aio_read = pex_device_aio_read,
|
432 |
|
|
.aio_write = pex_device_aio_write,
|
433 |
|
|
};
|
434 |
|
|
|
435 |
|
|
//-----------------------------------------------------------------------------
|
436 |
|
|
//-----------------------------------------------------------------------------
|
437 |
|
|
//-----------------------------------------------------------------------------
|
438 |
|
|
//-----------------------------------------------------------------------------
|
439 |
|
|
|
440 |
|
|
static const struct pci_device_id pex_device_id[] = {
|
441 |
|
|
{
|
442 |
|
|
.vendor = INSYS_VENDOR_ID,
|
443 |
|
|
.device = AMBPEX5_DEVID,
|
444 |
|
|
.subvendor = PCI_ANY_ID,
|
445 |
|
|
.subdevice = PCI_ANY_ID,
|
446 |
|
|
},
|
447 |
30 |
dsmv |
|
448 |
2 |
dsmv |
{ },
|
449 |
|
|
};
|
450 |
|
|
|
451 |
|
|
MODULE_DEVICE_TABLE(pci, pex_device_id);
|
452 |
|
|
|
453 |
|
|
//-----------------------------------------------------------------------------
|
454 |
|
|
|
455 |
|
|
static int __devinit pex_device_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
456 |
|
|
{
|
457 |
|
|
int error = 0;
|
458 |
|
|
int i = 0;
|
459 |
|
|
struct pex_device *brd = NULL;
|
460 |
|
|
|
461 |
|
|
mutex_lock(&pex_mutex);
|
462 |
|
|
|
463 |
|
|
brd = kzalloc(sizeof(struct pex_device), GFP_KERNEL);
|
464 |
|
|
if(!brd) {
|
465 |
|
|
error = -ENOMEM;
|
466 |
|
|
goto do_out;
|
467 |
|
|
}
|
468 |
|
|
|
469 |
|
|
INIT_LIST_HEAD(&brd->m_list);
|
470 |
|
|
mutex_init(&brd->m_BoardMutex);
|
471 |
|
|
sema_init(&brd->m_BoardSem, 1);
|
472 |
|
|
spin_lock_init(&brd->m_BoardLock);
|
473 |
|
|
atomic_set(&brd->m_TotalIRQ, 0);
|
474 |
|
|
init_waitqueue_head(&brd->m_WaitQueue);
|
475 |
|
|
init_timer(&brd->m_TimeoutTimer);
|
476 |
|
|
spin_lock_init(&brd->m_MemListLock);
|
477 |
|
|
atomic_set(&brd->m_MemListCount, 0);
|
478 |
|
|
INIT_LIST_HEAD(&brd->m_MemList);
|
479 |
|
|
brd->m_pci = dev;
|
480 |
|
|
brd->m_Interrupt = -1;
|
481 |
|
|
brd->m_DmaIrqEnbl = 0;
|
482 |
|
|
brd->m_FlgIrqEnbl = 0;
|
483 |
|
|
brd->m_class = pex_class;
|
484 |
|
|
|
485 |
|
|
set_device_name(brd, dev->device, boards_count);
|
486 |
|
|
|
487 |
|
|
dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x, board name %s\n", __FUNCTION__, dev->device, dev->vendor, brd->m_name);
|
488 |
|
|
|
489 |
|
|
error = pci_enable_device(dev);
|
490 |
|
|
if(error) {
|
491 |
|
|
err_msg(err_trace, "%s(): error enabling pci device\n", __FUNCTION__);
|
492 |
|
|
goto do_free_memory;
|
493 |
|
|
}
|
494 |
|
|
|
495 |
|
|
if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) || pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) {
|
496 |
|
|
printk("%s(): error set pci dma mask\n", __FUNCTION__);
|
497 |
|
|
goto do_disable_device;
|
498 |
|
|
}
|
499 |
|
|
|
500 |
|
|
pci_set_master(dev);
|
501 |
|
|
|
502 |
|
|
brd->m_BAR0.physical_address = pci_resource_start(dev, 0);
|
503 |
|
|
brd->m_BAR0.size = pci_resource_len(dev, 0);
|
504 |
|
|
brd->m_BAR0.virtual_address = ioremap_nocache(brd->m_BAR0.physical_address, brd->m_BAR0.size);
|
505 |
|
|
if(!brd->m_BAR0.virtual_address) {
|
506 |
|
|
error = -ENOMEM;
|
507 |
|
|
err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
|
508 |
|
|
goto do_disable_device;
|
509 |
|
|
}
|
510 |
|
|
|
511 |
|
|
dbg_msg(dbg_trace, "%s(): map bar0 %zx -> %p\n", __FUNCTION__, brd->m_BAR0.physical_address, brd->m_BAR0.virtual_address);
|
512 |
|
|
|
513 |
|
|
brd->m_BAR1.physical_address = pci_resource_start(dev, 1);
|
514 |
|
|
brd->m_BAR1.size = pci_resource_len(dev, 1);
|
515 |
|
|
brd->m_BAR1.virtual_address = ioremap_nocache(brd->m_BAR1.physical_address, brd->m_BAR1.size);
|
516 |
|
|
if(!brd->m_BAR1.virtual_address) {
|
517 |
|
|
error = -ENOMEM;
|
518 |
|
|
err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
|
519 |
|
|
goto do_unmap_bar0;
|
520 |
|
|
}
|
521 |
|
|
|
522 |
|
|
dbg_msg(dbg_trace, "%s(): map bar1 %zx -> %p\n", __FUNCTION__, brd->m_BAR1.physical_address, brd->m_BAR1.virtual_address);
|
523 |
|
|
|
524 |
|
|
error = request_irq(dev->irq, pex_device_isr, IRQF_SHARED, brd->m_name, brd);
|
525 |
|
|
if( error < 0) {
|
526 |
|
|
error = -EBUSY;
|
527 |
|
|
err_msg( err_trace, "%s(): error in request_irq()\n", __FUNCTION__ );
|
528 |
|
|
goto do_unmap_bar1;
|
529 |
|
|
}
|
530 |
|
|
|
531 |
|
|
brd->m_Interrupt = dev->irq;
|
532 |
|
|
|
533 |
|
|
cdev_init(&brd->m_cdev, &pex_fops);
|
534 |
|
|
brd->m_cdev.owner = THIS_MODULE;
|
535 |
|
|
brd->m_cdev.ops = &pex_fops;
|
536 |
|
|
brd->m_devno = MKDEV(MAJOR(devno), boards_count);
|
537 |
|
|
|
538 |
|
|
error = cdev_add(&brd->m_cdev, brd->m_devno, 1);
|
539 |
|
|
if(error) {
|
540 |
|
|
err_msg(err_trace, "%s(): Error add char device %d\n", __FUNCTION__, boards_count);
|
541 |
|
|
error = -EINVAL;
|
542 |
|
|
goto do_free_irq;
|
543 |
|
|
}
|
544 |
|
|
|
545 |
|
|
dbg_msg(dbg_trace, "%s(): Add cdev %d\n", __FUNCTION__, boards_count);
|
546 |
|
|
|
547 |
|
|
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
|
548 |
6 |
v.karak |
brd->m_device = device_create(pex_class, NULL, brd->m_devno, "%s%d", "pexdrv", boards_count);
|
549 |
2 |
dsmv |
#else
|
550 |
6 |
v.karak |
brd->m_device = device_create(pex_class, NULL, brd->m_devno, NULL, "%s%d", "pexdrv", boards_count);
|
551 |
2 |
dsmv |
#endif
|
552 |
|
|
if(!brd->m_device ) {
|
553 |
|
|
err_msg(err_trace, "%s(): Error create device for board: %s\n", __FUNCTION__, brd->m_name);
|
554 |
|
|
error = -EINVAL;
|
555 |
|
|
goto do_delete_cdev;
|
556 |
|
|
}
|
557 |
|
|
|
558 |
|
|
dbg_msg(dbg_trace, "%s(): Create device file for board: %s\n", __FUNCTION__, brd->m_name);
|
559 |
|
|
|
560 |
|
|
brd->m_BoardIndex = boards_count;
|
561 |
|
|
|
562 |
|
|
InitializeBoard(brd);
|
563 |
|
|
|
564 |
|
|
for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
|
565 |
|
|
|
566 |
|
|
if(brd->m_DmaChanMask & (1 << i)) {
|
567 |
|
|
|
568 |
|
|
brd->m_DmaChannel[i] = CDmaChannelCreate( i, brd,
|
569 |
|
|
&brd->m_pci->dev,
|
570 |
|
|
brd->m_MaxDmaSize[i],
|
571 |
|
|
brd->m_BlockFifoId[i], 1 );
|
572 |
|
|
}
|
573 |
|
|
}
|
574 |
|
|
|
575 |
|
|
pex_register_proc(brd->m_name, pex_proc_info, brd);
|
576 |
|
|
|
577 |
|
|
list_add_tail(&brd->m_list, &device_list);
|
578 |
|
|
|
579 |
|
|
boards_count++;
|
580 |
|
|
|
581 |
|
|
dbg_msg(dbg_trace, "%s(): Board %s - setup complete\n", __FUNCTION__, brd->m_name);
|
582 |
|
|
|
583 |
|
|
mutex_unlock(&pex_mutex);
|
584 |
|
|
|
585 |
|
|
return error;
|
586 |
|
|
|
587 |
|
|
do_delete_cdev:
|
588 |
|
|
cdev_del(&brd->m_cdev);
|
589 |
|
|
|
590 |
|
|
do_free_irq:
|
591 |
|
|
free_irq(brd->m_Interrupt, brd);
|
592 |
|
|
|
593 |
|
|
do_unmap_bar1:
|
594 |
|
|
iounmap(brd->m_BAR1.virtual_address);
|
595 |
|
|
|
596 |
|
|
do_unmap_bar0:
|
597 |
|
|
iounmap(brd->m_BAR0.virtual_address);
|
598 |
|
|
|
599 |
|
|
|
600 |
|
|
do_disable_device:
|
601 |
|
|
pci_disable_device(dev);
|
602 |
|
|
|
603 |
|
|
do_free_memory:
|
604 |
|
|
kfree(brd);
|
605 |
|
|
|
606 |
|
|
do_out:
|
607 |
|
|
mutex_unlock(&pex_mutex);
|
608 |
|
|
|
609 |
|
|
return error;
|
610 |
|
|
}
|
611 |
|
|
|
612 |
|
|
//-----------------------------------------------------------------------------
|
613 |
|
|
|
614 |
|
|
static void __devexit pex_device_remove(struct pci_dev *dev)
|
615 |
|
|
{
|
616 |
|
|
struct list_head *pos, *n;
|
617 |
|
|
struct pex_device *brd = NULL;
|
618 |
6 |
v.karak |
int i = 0;
|
619 |
2 |
dsmv |
|
620 |
|
|
dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x\n", __FUNCTION__, dev->device, dev->vendor);
|
621 |
|
|
|
622 |
|
|
mutex_lock(&pex_mutex);
|
623 |
|
|
|
624 |
|
|
list_for_each_safe(pos, n, &device_list) {
|
625 |
|
|
|
626 |
|
|
brd = list_entry(pos, struct pex_device, m_list);
|
627 |
|
|
|
628 |
|
|
if(brd->m_pci == dev) {
|
629 |
|
|
|
630 |
|
|
free_irq(brd->m_Interrupt, brd);
|
631 |
|
|
dbg_msg(dbg_trace, "%s(): free_irq() - complete\n", __FUNCTION__);
|
632 |
|
|
pex_remove_proc(brd->m_name);
|
633 |
|
|
dbg_msg(dbg_trace, "%s(): pex_remove_proc() - complete\n", __FUNCTION__);
|
634 |
6 |
v.karak |
for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
|
635 |
|
|
if(brd->m_DmaChannel[i]) {
|
636 |
|
|
CDmaChannelDelete(brd->m_DmaChannel[i]);
|
637 |
|
|
dbg_msg(dbg_trace, "%s(): free DMA channel %d - complete\n", __FUNCTION__, i);
|
638 |
|
|
}
|
639 |
|
|
}
|
640 |
2 |
dsmv |
free_memory(brd);
|
641 |
|
|
dbg_msg(dbg_trace, "%s(): free_memory() - complete\n", __FUNCTION__);
|
642 |
|
|
device_destroy(pex_class, brd->m_devno);
|
643 |
|
|
dbg_msg(dbg_trace, "%s(): device_destroy() - complete\n", __FUNCTION__);
|
644 |
|
|
cdev_del(&brd->m_cdev);
|
645 |
|
|
dbg_msg(dbg_trace, "%s(): cdev_del() - complete\n", __FUNCTION__);
|
646 |
|
|
iounmap(brd->m_BAR1.virtual_address);
|
647 |
|
|
dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
|
648 |
|
|
iounmap(brd->m_BAR0.virtual_address);
|
649 |
|
|
dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
|
650 |
|
|
pci_disable_device(dev);
|
651 |
|
|
dbg_msg(dbg_trace, "%s(): pci_disable_device() - complete\n", __FUNCTION__);
|
652 |
|
|
list_del(pos);
|
653 |
|
|
dbg_msg(dbg_trace, "%s(): list_del() - complete\n", __FUNCTION__);
|
654 |
|
|
kfree(brd);
|
655 |
|
|
dbg_msg(dbg_trace, "%s(): kfree() - complete\n", __FUNCTION__);
|
656 |
|
|
}
|
657 |
|
|
}
|
658 |
|
|
|
659 |
|
|
mutex_unlock(&pex_mutex);
|
660 |
|
|
}
|
661 |
|
|
|
662 |
|
|
//-----------------------------------------------------------------------------
|
663 |
|
|
|
664 |
|
|
static struct pci_driver pex_pci_driver = {
|
665 |
|
|
|
666 |
|
|
.name = PEX_DRIVER_NAME,
|
667 |
|
|
.id_table = pex_device_id,
|
668 |
|
|
.probe = pex_device_probe,
|
669 |
|
|
.remove = pex_device_remove,
|
670 |
|
|
};
|
671 |
|
|
|
672 |
|
|
//-----------------------------------------------------------------------------
|
673 |
|
|
|
674 |
|
|
static int __init pex_module_init(void)
|
675 |
|
|
{
|
676 |
|
|
int error = 0;
|
677 |
|
|
|
678 |
|
|
dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
|
679 |
|
|
|
680 |
|
|
mutex_init(&pex_mutex);
|
681 |
|
|
|
682 |
|
|
error = alloc_chrdev_region(&devno, 0, MAX_PEXDEVICE_SUPPORT, PEX_DRIVER_NAME);
|
683 |
|
|
if(error < 0) {
|
684 |
|
|
err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
|
685 |
|
|
goto do_out;
|
686 |
|
|
}
|
687 |
|
|
|
688 |
|
|
dbg_msg(dbg_trace, "%s(): Allocate %d device numbers. Major number = %d\n", __FUNCTION__, MAX_PEXDEVICE_SUPPORT, MAJOR(devno));
|
689 |
|
|
|
690 |
|
|
pex_class = class_create(THIS_MODULE, PEX_DRIVER_NAME);
|
691 |
|
|
if(!pex_class) {
|
692 |
|
|
err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
|
693 |
|
|
error = -EINVAL;
|
694 |
|
|
goto do_free_chrdev;
|
695 |
|
|
}
|
696 |
|
|
|
697 |
|
|
error = pci_register_driver(&pex_pci_driver);
|
698 |
|
|
if(error < 0) {
|
699 |
|
|
err_msg(err_trace, "%s(): Erorr register pci driver\n", __FUNCTION__);
|
700 |
|
|
error = -EINVAL;
|
701 |
|
|
goto do_delete_class;
|
702 |
|
|
}
|
703 |
|
|
|
704 |
|
|
return 0;
|
705 |
|
|
|
706 |
|
|
do_delete_class:
|
707 |
|
|
class_destroy(pex_class);
|
708 |
|
|
|
709 |
|
|
do_free_chrdev:
|
710 |
|
|
unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
|
711 |
|
|
|
712 |
|
|
do_out:
|
713 |
|
|
return error;
|
714 |
|
|
}
|
715 |
|
|
|
716 |
|
|
//-----------------------------------------------------------------------------
|
717 |
|
|
|
718 |
|
|
static void __exit pex_module_cleanup(void)
|
719 |
|
|
{
|
720 |
|
|
dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
|
721 |
|
|
|
722 |
|
|
pci_unregister_driver(&pex_pci_driver);
|
723 |
|
|
|
724 |
|
|
if(pex_class)
|
725 |
|
|
class_destroy(pex_class);
|
726 |
|
|
|
727 |
|
|
unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
|
728 |
|
|
}
|
729 |
|
|
|
730 |
|
|
//-----------------------------------------------------------------------------
|
731 |
|
|
|
732 |
|
|
module_init(pex_module_init);
|
733 |
|
|
module_exit(pex_module_cleanup);
|
734 |
|
|
|
735 |
|
|
//-----------------------------------------------------------------------------
|