OpenCores
URL https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk

Subversion Repositories pcie_ds_dma

[/] [pcie_ds_dma/] [trunk/] [soft/] [linux/] [driver/] [pexdrv/] [pexmodule.c] - Blame information for rev 36

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 dsmv
 
2
#include <linux/kernel.h>
3
#include <linux/module.h>
4
#include <linux/version.h>
5
#include <linux/init.h>
6
#include <linux/fs.h>
7
#include <linux/ioport.h>
8
#include <linux/list.h>
9
#include <linux/pci.h>
10
#include <linux/proc_fs.h>
11
#include <linux/interrupt.h>
12
#include <asm/io.h>
13
 
14
#include <asm/uaccess.h>
15
#include <linux/types.h>
16
#include <linux/ioport.h>
17
#include <linux/poll.h>
18
#include <linux/pci.h>
19
#include <linux/interrupt.h>
20
 
21
#include "pexmodule.h"
22
#include "hardware.h"
23
#include "pexioctl.h"
24
#include "ioctlrw.h"
25
#include "ambpexregs.h"
26
#include "pexproc.h"
27
 
28
//-----------------------------------------------------------------------------
29
 
30
MODULE_AUTHOR("Vladimir Karakozov. karakozov@gmail.com");
31
MODULE_LICENSE("GPL");
32
 
33
//-----------------------------------------------------------------------------
34
 
35
static dev_t devno = MKDEV(0, 0);
36
static struct class *pex_class = NULL;
37
static LIST_HEAD(device_list);
38
static int boards_count = 0;
39
static struct mutex pex_mutex;
40 6 v.karak
int dbg_trace = 1;
41 2 dsmv
int err_trace = 1;
42
 
43
//-----------------------------------------------------------------------------
44
 
45
static int free_memory(struct pex_device *brd)
46
{
47
    struct list_head *pos, *n;
48
    struct mem_t *m = NULL;
49
    int unlocked = 0;
50
 
51
    spin_lock(&brd->m_MemListLock);
52
 
53
    list_for_each_safe(pos, n, &brd->m_MemList) {
54
 
55
        m = list_entry(pos, struct mem_t, list);
56
 
57
        unlocked = unlock_pages(m->cpu_addr, m->size);
58
 
59
        dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle);
60
 
61
        dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
62
                __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked );
63
 
64
        list_del(pos);
65
 
66
        atomic_dec(&brd->m_MemListCount);
67
 
68
        kfree(m);
69
    }
70
 
71
    spin_unlock(&brd->m_MemListLock);
72
 
73
    return 0;
74
}
75
 
76
//-----------------------------------------------------------------------------
77
 
78
static struct pex_device *file_to_device( struct file *file )
79
{
80
    return (struct pex_device*)file->private_data;
81
}
82
 
83
//-----------------------------------------------------------------------------
84
 
85
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
86
static struct pex_device *inode_to_device( struct list_head *head, struct inode *inode )
87
{
88
    struct list_head *p;
89
    struct pex_device *entry;
90
    unsigned int minor = MINOR(inode->i_rdev);
91
 
92
    list_for_each(p, head) {
93
        entry = list_entry(p, struct pex_device, m_list);
94
        if(entry->m_BoardIndex == minor)
95
            return entry;
96
    }
97
 
98
    return NULL;
99
}
100
#endif
101
 
102
//-----------------------------------------------------------------------------
103
 
104
static int pex_device_fasync(int fd, struct file *file, int mode)
105
{
106
    struct pex_device *pDevice = file->private_data;
107
    if(!pDevice)
108
        return -ENODEV;
109
 
110
    return 0;
111
}
112
 
113
//-----------------------------------------------------------------------------
114
 
115
static unsigned int pex_device_poll(struct file *filp, poll_table *wait)
116
{
117
    unsigned int mask = 0;
118
 
119
    struct pex_device *pDevice = file_to_device(filp);
120
    if(!pDevice)
121
        return -ENODEV;
122
 
123
    return mask;
124
}
125
 
126
//-----------------------------------------------------------------------------
127
 
128
static int pex_device_open( struct inode *inode, struct file *file )
129
{
130
    struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
131
    if(!pDevice) {
132
        err_msg(err_trace, "%s(): Open device failed\n", __FUNCTION__);
133
        return -ENODEV;
134
    }
135
 
136
    file->private_data = (void*)pDevice;
137
 
138
    dbg_msg(dbg_trace, "%s(): Open device %s\n", __FUNCTION__, pDevice->m_name);
139
 
140
    return 0;
141
}
142
 
143
//-----------------------------------------------------------------------------
144
 
145
static int pex_device_close( struct inode *inode, struct file *file )
146
{
147
    struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
148
    if(!pDevice) {
149
        err_msg(err_trace, "%s(): Close device failed\n", __FUNCTION__);
150
        return -ENODEV;
151
    }
152
 
153
    file->private_data = NULL;
154
 
155
    dbg_msg(dbg_trace, "%s(): Close device %s\n", __FUNCTION__, pDevice->m_name);
156
 
157
    return 0;
158
}
159
 
160
//-----------------------------------------------------------------------------
161
 
162
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
163
static long pex_device_ioctl( struct file *file, unsigned int cmd, unsigned long arg )
164
#else
165
static int pex_device_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg )
166
#endif
167
{
168
    int error = 0;
169
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
170
    struct pex_device *pDevice = file_to_device(file);
171
#else
172
    struct pex_device *pDevice = inode_to_device(&device_list, inode);
173
#endif
174
    if(!pDevice) {
175
        err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
176
        return -ENODEV;
177
    }
178
 
179
    mutex_lock(&pDevice->m_BoardMutex);
180
 
181
    switch(cmd) {
182
    case IOCTL_PEX_BOARD_INFO:
183
        error = ioctl_board_info(pDevice, arg);
184
        break;
185
    case IOCTL_PEX_MEM_ALLOC:
186
        error = ioctl_memory_alloc(pDevice, arg);
187
        break;
188
    case IOCTL_PEX_MEM_FREE:
189
        error = ioctl_memory_free(pDevice, arg);
190
        break;
191
    case IOCTL_PEX_STUB_ALLOC:
192
        error = ioctl_stub_alloc(pDevice, arg);
193
        break;
194
    case IOCTL_PEX_STUB_FREE:
195
        error = ioctl_stub_free(pDevice, arg);
196
        break;
197
    case IOCTL_AMB_SET_MEMIO:
198
        error = ioctl_set_mem(pDevice, arg);
199
        break;
200
    case IOCTL_AMB_FREE_MEMIO:
201
        error = ioctl_free_mem(pDevice, arg);
202
        break;
203
    case IOCTL_AMB_START_MEMIO:
204
        error = ioctl_start_mem(pDevice, arg);
205
        break;
206
    case IOCTL_AMB_STOP_MEMIO:
207
        error = ioctl_stop_mem(pDevice, arg);
208
        break;
209
    case IOCTL_AMB_STATE_MEMIO:
210
        error = ioctl_state_mem(pDevice, arg);
211
        break;
212
    case IOCTL_AMB_WAIT_DMA_BUFFER:
213
        error = ioctl_wait_dma_buffer(pDevice, arg);
214
        break;
215
    case IOCTL_AMB_WAIT_DMA_BLOCK:
216
        error = ioctl_wait_dma_block(pDevice, arg);
217
        break;
218
    case IOCTL_AMB_SET_SRC_MEM:
219
        error = ioctl_set_src_mem(pDevice, arg);
220
        break;
221
    case IOCTL_AMB_SET_DIR_MEM:
222
        error = ioctl_set_dir_mem(pDevice, arg);
223
        break;
224
    case IOCTL_AMB_SET_DRQ_MEM:
225
        error = ioctl_set_drq_mem(pDevice, arg);
226
        break;
227
    case IOCTL_AMB_RESET_FIFO:
228
        error = ioctl_reset_fifo(pDevice, arg);
229
        break;
230
    case IOCTL_AMB_DONE:
231
        error = ioctl_done(pDevice, arg);
232
        break;
233
    case IOCTL_AMB_ADJUST:
234
        error = ioctl_adjust(pDevice, arg);
235
        break;
236
 
237
    default:
238
        dbg_msg(dbg_trace, "%s(): Unknown command\n", __FUNCTION__);
239
        error = -EINVAL;
240
        break;
241
    }
242
 
243
    mutex_unlock(&pDevice->m_BoardMutex);
244
 
245
    return error;
246
}
247
 
248
//-----------------------------------------------------------------------------
249
 
250
static inline int private_mapping_ok(struct vm_area_struct *vma)
251
{
252
    return vma->vm_flags & VM_MAYSHARE;
253
}
254
 
255
//-----------------------------------------------------------------------------
256
 
257
static int pex_device_mmap(struct file *file, struct vm_area_struct *vma)
258
{
259
    size_t size = vma->vm_end - vma->vm_start;
260
 
261
    if (!private_mapping_ok(vma))
262
        return -ENOSYS;
263
 
264
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
265
 
266
    if (remap_pfn_range(vma,
267
                        vma->vm_start,
268
                        vma->vm_pgoff,
269
                        size,
270
                        vma->vm_page_prot)) {
271
        err_msg(err_trace, "%s(): error in remap_page_range.\n", __FUNCTION__ );
272
        return -EAGAIN;
273
    }
274
    return 0;
275
}
276
 
277
//-----------------------------------------------------------------------------
278
 
279
static ssize_t pex_device_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long count, loff_t off)
280
{
281
    struct pex_device *pDevice = file_to_device(iocb->ki_filp);
282
    if(!pDevice) {
283
        err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
284
        return -ENODEV;
285
    }
286
    return -ENOSYS;
287
}
288
 
289
//-----------------------------------------------------------------------------
290
 
291
static ssize_t pex_device_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long count, loff_t off)
292
{
293
    struct pex_device *pDevice = file_to_device(iocb->ki_filp);
294
    if(!pDevice) {
295
        err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
296
        return -ENODEV;
297
    }
298
    return -ENOSYS;
299
}
300
 
301
//-----------------------------------------------------------------------------
302
 
303
static irqreturn_t pex_device_isr( int irq, void *pContext )
304
{
305
    FIFO_STATUS FifoStatus;  //
306
 
307
    struct pex_device* pDevice = (struct pex_device*)pContext;            // our device
308
 
309
    if(!pDevice->m_DmaIrqEnbl && !pDevice->m_FlgIrqEnbl)
310
        return IRQ_NONE;
311
 
312
    if(pDevice->m_FlgIrqEnbl)
313
    {  // прерывание от флагов состояния
314
        /*
315
            u32 status = ReadOperationWordReg(pDevice, PEMAINadr_BRD_STATUS);
316
            err_msg(err_trace, "%s(): BRD_STATUS = 0x%X.\n", __FUNCTION__, status);
317
            if(status & 0x4000)
318
            {
319
                    for(int i = 0; i < NUM_TETR_IRQ; i++)
320
                            if(pDevice->m_TetrIrq[i] != 0)
321
                            {
322
                                    u32 status = ReadAmbMainReg(pDevice, pDevice->m_TetrIrq[i].Address);
323
                                    KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
324
                                                            i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
325
                                    status ^= pDevice->m_TetrIrq[i].IrqInv;
326
                                    status &= pDevice->m_TetrIrq[i].IrqMask;
327
                                    KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
328
                                                            i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
329
                                    if(status)
330
                                    {
331
                                            KeInsertQueueDpc(&pDevice->m_TetrIrq[i].Dpc, NULL, NULL);
332
                                            KdPrint(("CWambpex::WambpexIsr - Tetrad IRQ address = %d\n", pDevice->m_TetrIrq[i].Address));
333
                                            // сброс статусного бита, вызвавшего прерывание
334
                                            //pDevice->WriteAmbMainReg(pDevice->m_TetrIrq[i].Address + 0x200);
335
                                            ULONG CmdAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_ADR * REG_SIZE;
336
                                            pDevice->WriteAmbMainReg(CmdAddress, 0);
337
                                            ULONG DataAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_DATA * REG_SIZE;
338
                                            ULONG Mode0Value = pDevice->ReadAmbMainReg(DataAddress);
339
                                            Mode0Value &= 0xFFFB;
340
                                            //pDevice->WriteAmbMainReg(CmdAddress, 0);
341
                                            pDevice->WriteAmbMainReg(DataAddress, Mode0Value);
342
                                            break;
343
                                    }
344
                            }
345
                return IRQ_HANDLED;
346
            }
347
            else // вообще не наше прерывание !!!
348
                    return IRQ_NONE;    // we did not interrupt
349
            */
350
    }
351
 
352
    if(pDevice->m_DmaIrqEnbl)
353
    {   // прерывание от каналов ПДП
354
        u32 i=0;
355
        u32 FifoAddr = 0;
356
        u32 iChan = pDevice->m_primChan;
357
        u32 NumberOfChannel = -1;
358
 
359
        for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++)
360
        {
361
            if(pDevice->m_DmaChanMask & (1 << iChan))
362
            {
363
                FifoAddr = pDevice->m_FifoAddr[iChan];
364
                FifoStatus.AsWhole = ReadOperationWordReg(pDevice, PEFIFOadr_FIFO_STATUS + FifoAddr);
365
                if(FifoStatus.ByBits.IntRql)
366
                {
367
                    //err_msg(err_trace, "%s(): - Channel = %d, Fifo Status = 0x%X\n", __FUNCTION__, iChan, FifoStatus.AsWhole);
368
                    NumberOfChannel = iChan;
369
                    pDevice->m_primChan = ((pDevice->m_primChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : pDevice->m_primChan+1;
370
                    break;
371
                }
372
            }
373
            iChan = ((iChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : iChan+1;
374
        }
375
 
376
        if(NumberOfChannel != -1)
377
        {
378
            u32 flag = 0;
379
 
380
            //err_msg(err_trace, "%s(%d)\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ));
381
 
382
            flag = NextDmaTransfer(pDevice->m_DmaChannel[NumberOfChannel]);
383 30 dsmv
            //if(!flag)
384
            if( 0 )
385 2 dsmv
            {
386
                DMA_CTRL_EXT CtrlExt;
387
                CtrlExt.AsWhole = 0;
388
                CtrlExt.ByBits.Pause = 1;
389
                CtrlExt.ByBits.Start = 1;
390
                WriteOperationWordReg(pDevice, PEFIFOadr_DMA_CTRL + FifoAddr, CtrlExt.AsWhole);
391
                //err_msg(err_trace, "%s(): - Pause (%d) - m_CurBlockNum = %d, m_DoneBlock = %d\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ),
392
                //        pDevice->m_DmaChannel[NumberOfChannel]->m_CurBlockNum,
393
                //        pDevice->m_DmaChannel[NumberOfChannel]->m_DoneBlock);
394
            }
395
 
396
            //err_msg(err_trace, "%s(): - Flag Clear\n", __FUNCTION__);
397
            WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x10);
398
            WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x00);
399
            //err_msg(err_trace, "%s(): - Complete\n", __FUNCTION__);
400
 
401
            atomic_inc(&pDevice->m_TotalIRQ);
402
 
403
            return IRQ_HANDLED;
404
        }
405
    }
406
    return IRQ_NONE;    // we did not interrupt
407
}
408
 
409
//-----------------------------------------------------------------------------
410
 
411
struct file_operations pex_fops = {
412
 
413
    .owner = THIS_MODULE,
414
    .read = NULL,
415
    .write = NULL,
416
 
417
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
418
    .unlocked_ioctl = pex_device_ioctl,
419
    .compat_ioctl = pex_device_ioctl,
420
#else
421
    .ioctl = pex_device_ioctl,
422
#endif
423
 
424
    .mmap = pex_device_mmap,
425
    .open = pex_device_open,
426
    .release = pex_device_close,
427
    .fasync = pex_device_fasync,
428
    .poll = pex_device_poll,
429
    .aio_read =  pex_device_aio_read,
430
    .aio_write = pex_device_aio_write,
431
};
432
 
433
//-----------------------------------------------------------------------------
434
//-----------------------------------------------------------------------------
435
//-----------------------------------------------------------------------------
436
//-----------------------------------------------------------------------------
437
 
438
static const struct pci_device_id pex_device_id[] = {
439
{
440
        .vendor =       INSYS_VENDOR_ID,
441
        .device =       AMBPEX5_DEVID,
442
        .subvendor =    PCI_ANY_ID,
443
        .subdevice =    PCI_ANY_ID,
444
},
445 30 dsmv
 
446 2 dsmv
{ },
447
};
448
 
449
MODULE_DEVICE_TABLE(pci, pex_device_id);
450
 
451
//-----------------------------------------------------------------------------
452
 
453 34 v.karak
static int pex_device_probe(struct pci_dev *dev, const struct pci_device_id *id)
454 2 dsmv
{
455
    int error = 0;
456
    int i = 0;
457
    struct pex_device *brd = NULL;
458
 
459
    mutex_lock(&pex_mutex);
460
 
461
    brd = kzalloc(sizeof(struct pex_device), GFP_KERNEL);
462
    if(!brd) {
463
        error = -ENOMEM;
464
        goto do_out;
465
    }
466
 
467
    INIT_LIST_HEAD(&brd->m_list);
468
    mutex_init(&brd->m_BoardMutex);
469
    sema_init(&brd->m_BoardSem, 1);
470
    spin_lock_init(&brd->m_BoardLock);
471
    atomic_set(&brd->m_TotalIRQ, 0);
472
    init_waitqueue_head(&brd->m_WaitQueue);
473
    init_timer(&brd->m_TimeoutTimer);
474
    spin_lock_init(&brd->m_MemListLock);
475
    atomic_set(&brd->m_MemListCount, 0);
476
    INIT_LIST_HEAD(&brd->m_MemList);
477
    brd->m_pci = dev;
478
    brd->m_Interrupt = -1;
479
    brd->m_DmaIrqEnbl = 0;
480
    brd->m_FlgIrqEnbl = 0;
481
    brd->m_class = pex_class;
482
 
483
    set_device_name(brd, dev->device, boards_count);
484
 
485
    dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x, board name %s\n", __FUNCTION__, dev->device, dev->vendor, brd->m_name);
486
 
487
    error = pci_enable_device(dev);
488
    if(error) {
489
        err_msg(err_trace, "%s(): error enabling pci device\n", __FUNCTION__);
490
        goto do_free_memory;
491
    }
492
 
493
    if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) || pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) {
494
        printk("%s(): error set pci dma mask\n", __FUNCTION__);
495
        goto do_disable_device;
496
    }
497
 
498
    pci_set_master(dev);
499
 
500
    brd->m_BAR0.physical_address = pci_resource_start(dev, 0);
501
    brd->m_BAR0.size = pci_resource_len(dev, 0);
502
    brd->m_BAR0.virtual_address = ioremap_nocache(brd->m_BAR0.physical_address, brd->m_BAR0.size);
503
    if(!brd->m_BAR0.virtual_address) {
504
        error = -ENOMEM;
505
        err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
506
        goto do_disable_device;
507
    }
508
 
509
    dbg_msg(dbg_trace, "%s(): map bar0 %zx -> %p\n", __FUNCTION__, brd->m_BAR0.physical_address, brd->m_BAR0.virtual_address);
510
 
511
    brd->m_BAR1.physical_address = pci_resource_start(dev, 1);
512
    brd->m_BAR1.size = pci_resource_len(dev, 1);
513
    brd->m_BAR1.virtual_address = ioremap_nocache(brd->m_BAR1.physical_address, brd->m_BAR1.size);
514
    if(!brd->m_BAR1.virtual_address) {
515
        error = -ENOMEM;
516
        err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
517
        goto do_unmap_bar0;
518
    }
519
 
520
    dbg_msg(dbg_trace, "%s(): map bar1 %zx -> %p\n", __FUNCTION__, brd->m_BAR1.physical_address, brd->m_BAR1.virtual_address);
521
 
522
    error = request_irq(dev->irq, pex_device_isr, IRQF_SHARED, brd->m_name, brd);
523
    if( error < 0) {
524
        error = -EBUSY;
525
        err_msg( err_trace, "%s(): error in request_irq()\n", __FUNCTION__ );
526
        goto do_unmap_bar1;
527
    }
528
 
529
    brd->m_Interrupt = dev->irq;
530
 
531
    cdev_init(&brd->m_cdev, &pex_fops);
532
    brd->m_cdev.owner = THIS_MODULE;
533
    brd->m_cdev.ops = &pex_fops;
534
    brd->m_devno = MKDEV(MAJOR(devno), boards_count);
535
 
536
    error = cdev_add(&brd->m_cdev, brd->m_devno, 1);
537
    if(error) {
538
        err_msg(err_trace, "%s(): Error add char device %d\n", __FUNCTION__, boards_count);
539
        error = -EINVAL;
540
        goto do_free_irq;
541
    }
542
 
543
    dbg_msg(dbg_trace, "%s(): Add cdev %d\n", __FUNCTION__, boards_count);
544
 
545
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
546 6 v.karak
    brd->m_device = device_create(pex_class, NULL, brd->m_devno, "%s%d", "pexdrv", boards_count);
547 2 dsmv
#else
548 6 v.karak
    brd->m_device = device_create(pex_class, NULL, brd->m_devno, NULL, "%s%d", "pexdrv", boards_count);
549 2 dsmv
#endif
550
    if(!brd->m_device ) {
551
        err_msg(err_trace, "%s(): Error create device for board: %s\n", __FUNCTION__, brd->m_name);
552
        error = -EINVAL;
553
        goto do_delete_cdev;
554
    }
555
 
556
    dbg_msg(dbg_trace, "%s(): Create device file for board: %s\n", __FUNCTION__, brd->m_name);
557
 
558
    brd->m_BoardIndex = boards_count;
559
 
560
    InitializeBoard(brd);
561
 
562
    for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
563
 
564
        if(brd->m_DmaChanMask & (1 << i)) {
565
 
566
            brd->m_DmaChannel[i] = CDmaChannelCreate( i,  brd,
567
                                                      &brd->m_pci->dev,
568
                                                      brd->m_MaxDmaSize[i],
569
                                                      brd->m_BlockFifoId[i], 1 );
570
        }
571
    }
572
 
573
    pex_register_proc(brd->m_name, pex_proc_info, brd);
574
 
575
    list_add_tail(&brd->m_list, &device_list);
576
 
577
    boards_count++;
578
 
579
    dbg_msg(dbg_trace, "%s(): Board %s - setup complete\n", __FUNCTION__, brd->m_name);
580
 
581
    mutex_unlock(&pex_mutex);
582
 
583
    return error;
584
 
585
do_delete_cdev:
586
    cdev_del(&brd->m_cdev);
587
 
588
do_free_irq:
589
    free_irq(brd->m_Interrupt, brd);
590
 
591
do_unmap_bar1:
592
    iounmap(brd->m_BAR1.virtual_address);
593
 
594
do_unmap_bar0:
595
    iounmap(brd->m_BAR0.virtual_address);
596
 
597
 
598
do_disable_device:
599
    pci_disable_device(dev);
600
 
601
do_free_memory:
602
    kfree(brd);
603
 
604
do_out:
605
    mutex_unlock(&pex_mutex);
606
 
607
    return error;
608
}
609
 
610
//-----------------------------------------------------------------------------
611
 
612 34 v.karak
static void pex_device_remove(struct pci_dev *dev)
613 2 dsmv
{
614
    struct list_head *pos, *n;
615
    struct pex_device *brd = NULL;
616 6 v.karak
    int i = 0;
617 2 dsmv
 
618
    dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x\n", __FUNCTION__, dev->device, dev->vendor);
619
 
620
    mutex_lock(&pex_mutex);
621
 
622
    list_for_each_safe(pos, n, &device_list) {
623
 
624
        brd = list_entry(pos, struct pex_device, m_list);
625
 
626
        if(brd->m_pci == dev) {
627
 
628
            free_irq(brd->m_Interrupt, brd);
629
            dbg_msg(dbg_trace, "%s(): free_irq() - complete\n", __FUNCTION__);
630
            pex_remove_proc(brd->m_name);
631
            dbg_msg(dbg_trace, "%s(): pex_remove_proc() - complete\n", __FUNCTION__);
632 6 v.karak
            for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
633
                if(brd->m_DmaChannel[i]) {
634
                     CDmaChannelDelete(brd->m_DmaChannel[i]);
635
                     dbg_msg(dbg_trace, "%s(): free DMA channel %d - complete\n", __FUNCTION__, i);
636
                }
637
            }
638 2 dsmv
            free_memory(brd);
639
            dbg_msg(dbg_trace, "%s(): free_memory() - complete\n", __FUNCTION__);
640
            device_destroy(pex_class, brd->m_devno);
641
            dbg_msg(dbg_trace, "%s(): device_destroy() - complete\n", __FUNCTION__);
642
            cdev_del(&brd->m_cdev);
643
            dbg_msg(dbg_trace, "%s(): cdev_del() - complete\n", __FUNCTION__);
644
            iounmap(brd->m_BAR1.virtual_address);
645
            dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
646
            iounmap(brd->m_BAR0.virtual_address);
647
            dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
648
            pci_disable_device(dev);
649
            dbg_msg(dbg_trace, "%s(): pci_disable_device() - complete\n", __FUNCTION__);
650
            list_del(pos);
651
            dbg_msg(dbg_trace, "%s(): list_del() - complete\n", __FUNCTION__);
652
            kfree(brd);
653
            dbg_msg(dbg_trace, "%s(): kfree() - complete\n", __FUNCTION__);
654
        }
655
    }
656
 
657
    mutex_unlock(&pex_mutex);
658
}
659
 
660
//-----------------------------------------------------------------------------
661
 
662
static struct pci_driver pex_pci_driver = {
663
 
664
    .name = PEX_DRIVER_NAME,
665
    .id_table = pex_device_id,
666
    .probe = pex_device_probe,
667
    .remove = pex_device_remove,
668
};
669
 
670
//-----------------------------------------------------------------------------
671
 
672
static int __init pex_module_init(void)
673
{
674
    int error = 0;
675
 
676
    dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
677
 
678
    mutex_init(&pex_mutex);
679
 
680
    error = alloc_chrdev_region(&devno, 0, MAX_PEXDEVICE_SUPPORT, PEX_DRIVER_NAME);
681
    if(error < 0) {
682
        err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
683
        goto do_out;
684
    }
685
 
686
    dbg_msg(dbg_trace, "%s(): Allocate %d device numbers. Major number = %d\n", __FUNCTION__, MAX_PEXDEVICE_SUPPORT, MAJOR(devno));
687
 
688
    pex_class = class_create(THIS_MODULE, PEX_DRIVER_NAME);
689
    if(!pex_class) {
690
        err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
691
        error = -EINVAL;
692
        goto do_free_chrdev;
693
    }
694
 
695
    error = pci_register_driver(&pex_pci_driver);
696
    if(error < 0) {
697
        err_msg(err_trace, "%s(): Erorr register pci driver\n", __FUNCTION__);
698
        error = -EINVAL;
699
        goto do_delete_class;
700
    }
701
 
702
    return 0;
703
 
704
do_delete_class:
705
    class_destroy(pex_class);
706
 
707
do_free_chrdev:
708
    unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
709
 
710
do_out:
711
    return error;
712
}
713
 
714
//-----------------------------------------------------------------------------
715
 
716
static void __exit pex_module_cleanup(void)
717
{
718
    dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
719
 
720
    pci_unregister_driver(&pex_pci_driver);
721
 
722
    if(pex_class)
723
        class_destroy(pex_class);
724
 
725
    unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
726
}
727
 
728
//-----------------------------------------------------------------------------
729
 
730
module_init(pex_module_init);
731
module_exit(pex_module_cleanup);
732
 
733
//-----------------------------------------------------------------------------

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.