OpenCores
URL https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk

Subversion Repositories pcie_ds_dma

[/] [pcie_ds_dma/] [trunk/] [soft/] [linux/] [driver/] [pexdrv/] [pexmodule.c] - Blame information for rev 54

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 2 dsmv
 
2
#include <linux/kernel.h>
3
#include <linux/module.h>
4
#include <linux/version.h>
5
#include <linux/init.h>
6
#include <linux/fs.h>
7
#include <linux/ioport.h>
8
#include <linux/list.h>
9
#include <linux/pci.h>
10
#include <linux/proc_fs.h>
11
#include <linux/interrupt.h>
12
#include <asm/io.h>
13
 
14
#include <asm/uaccess.h>
15
#include <linux/types.h>
16
#include <linux/ioport.h>
17
#include <linux/poll.h>
18
#include <linux/pci.h>
19 54 v.karak
#include <linux/delay.h>
20 2 dsmv
 
21
#include "pexmodule.h"
22
#include "hardware.h"
23
#include "pexioctl.h"
24
#include "ioctlrw.h"
25
#include "ambpexregs.h"
26
#include "pexproc.h"
27 54 v.karak
#include "memory.h"
28 2 dsmv
 
29
//-----------------------------------------------------------------------------
30
 
31
MODULE_AUTHOR("Vladimir Karakozov. karakozov@gmail.com");
32
MODULE_LICENSE("GPL");
33
 
34
//-----------------------------------------------------------------------------
35
 
36
static dev_t devno = MKDEV(0, 0);
37
static struct class *pex_class = NULL;
38
static LIST_HEAD(device_list);
39
static int boards_count = 0;
40
static struct mutex pex_mutex;
41 6 v.karak
int dbg_trace = 1;
42 2 dsmv
int err_trace = 1;
43
 
44
//-----------------------------------------------------------------------------
45
 
46
static int free_memory(struct pex_device *brd)
47
{
48
    struct list_head *pos, *n;
49
    struct mem_t *m = NULL;
50
    int unlocked = 0;
51
 
52
    spin_lock(&brd->m_MemListLock);
53
 
54
    list_for_each_safe(pos, n, &brd->m_MemList) {
55
 
56
        m = list_entry(pos, struct mem_t, list);
57
 
58
        unlocked = unlock_pages(m->cpu_addr, m->size);
59
 
60
        dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle);
61
 
62
        dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
63
                __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked );
64
 
65
        list_del(pos);
66
 
67
        atomic_dec(&brd->m_MemListCount);
68
 
69
        kfree(m);
70
    }
71
 
72
    spin_unlock(&brd->m_MemListLock);
73
 
74
    return 0;
75
}
76
 
77
//-----------------------------------------------------------------------------
78
 
79
static struct pex_device *file_to_device( struct file *file )
80
{
81
    return (struct pex_device*)file->private_data;
82
}
83
 
84
//-----------------------------------------------------------------------------
85
 
86
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
87
static struct pex_device *inode_to_device( struct list_head *head, struct inode *inode )
88
{
89
    struct list_head *p;
90
    struct pex_device *entry;
91
    unsigned int minor = MINOR(inode->i_rdev);
92
 
93
    list_for_each(p, head) {
94
        entry = list_entry(p, struct pex_device, m_list);
95
        if(entry->m_BoardIndex == minor)
96
            return entry;
97
    }
98
 
99
    return NULL;
100
}
101
#endif
102
 
103
//-----------------------------------------------------------------------------
104 54 v.karak
/*
105
static ssize_t pex_device_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
106
{
107
    int error = 0;
108
    u32 AdmNumber = 0;
109
    u32 TetrNumber = 0;
110
    u32 Address = 0;
111
    struct CDmaChannel *dma = NULL;
112 2 dsmv
 
113 54 v.karak
    struct pex_device *pDevice = file_to_device(file);
114
    if(!pDevice) {
115
        err_msg(err_trace, "%s(): No such device\n", __FUNCTION__);
116
        return -ENODEV;
117
    }
118
 
119
    dma = pDevice->m_DmaChannel[0];
120
 
121
    printk("%s(): data = %p\n", __FUNCTION__, data);
122
    printk("%s(): size = 0x%lx\n", __FUNCTION__, count);
123
    printk("%s(): ppos = %p\n", __FUNCTION__, ppos);
124
 
125
    error = lock_user_memory( &dma->m_BufDscrVA, data, count);
126
    if(error < 0) {
127
        err_msg(err_trace, "%s(): Error in lock_user_memory()\n", __FUNCTION__);
128
        return -EINVAL;
129
    }
130
 
131
    dma->m_BlockCount = dma->m_BufDscrVA.PageCount;
132
    dma->m_ScatterGatherTableEntryCnt = dma->m_BufDscrVA.PageCount;
133
    dma->m_BlockSize = PAGE_SIZE;
134
 
135
    AdmNumber = 0;
136
    TetrNumber = 0;
137
    Address = AdmNumber*ADM_SIZE + TetrNumber*TETRAD_SIZE + TRDadr_DATA*REG_SIZE;
138
 
139
    SetDmaDirection(dma, 1);
140
    Adjust(dma, 0);
141
    SetDmaLocalAddress(dma, Address);
142
    SetAdmTetr(dma, AdmNumber, TetrNumber);
143
    SetDmaMode(pDevice, 0, AdmNumber, TetrNumber);
144
 
145
    RequestStub(dma,NULL);
146
    SetScatterGatherListExtUser(dma);
147
    StartDmaTransfer(dma, 0);
148
    HwStartDmaTransfer(pDevice, 0);
149
 
150
    error = WaitEvent(&dma->m_BufferEndEvent, 1000);
151
    if(error < 0) {
152
        err_msg(err_trace, "%s(): Timeout read operation\n", __FUNCTION__);
153
    }
154
 
155
    error = unlock_user_memory( &dma->m_BufDscrVA );
156
    if(error < 0) {
157
        err_msg(err_trace, "%s(): Error in lock_user_memory()\n", __FUNCTION__);
158
        return -EINVAL;
159
    }
160
 
161
    ReleaseStub( dma );
162
    ReleaseSGList( dma );
163
 
164
    return 0;
165
}
166
*/
167
//-----------------------------------------------------------------------------
168
 
169 2 dsmv
static int pex_device_fasync(int fd, struct file *file, int mode)
170
{
171
    struct pex_device *pDevice = file->private_data;
172
    if(!pDevice)
173
        return -ENODEV;
174
 
175
    return 0;
176
}
177
 
178
//-----------------------------------------------------------------------------
179
 
180
static unsigned int pex_device_poll(struct file *filp, poll_table *wait)
181
{
182
    unsigned int mask = 0;
183
 
184
    struct pex_device *pDevice = file_to_device(filp);
185
    if(!pDevice)
186
        return -ENODEV;
187
 
188
    return mask;
189
}
190
 
191
//-----------------------------------------------------------------------------
192
 
193
static int pex_device_open( struct inode *inode, struct file *file )
194
{
195
    struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
196
    if(!pDevice) {
197
        err_msg(err_trace, "%s(): Open device failed\n", __FUNCTION__);
198
        return -ENODEV;
199
    }
200
 
201
    file->private_data = (void*)pDevice;
202
 
203
    dbg_msg(dbg_trace, "%s(): Open device %s\n", __FUNCTION__, pDevice->m_name);
204
 
205
    return 0;
206
}
207
 
208
//-----------------------------------------------------------------------------
209
 
210
static int pex_device_close( struct inode *inode, struct file *file )
211
{
212
    struct pex_device *pDevice = container_of(inode->i_cdev, struct pex_device, m_cdev);
213
    if(!pDevice) {
214
        err_msg(err_trace, "%s(): Close device failed\n", __FUNCTION__);
215
        return -ENODEV;
216
    }
217
 
218
    file->private_data = NULL;
219
 
220
    dbg_msg(dbg_trace, "%s(): Close device %s\n", __FUNCTION__, pDevice->m_name);
221
 
222
    return 0;
223
}
224
 
225
//-----------------------------------------------------------------------------
226
 
227
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
228
static long pex_device_ioctl( struct file *file, unsigned int cmd, unsigned long arg )
229
#else
230
static int pex_device_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg )
231
#endif
232
{
233
    int error = 0;
234
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
235
    struct pex_device *pDevice = file_to_device(file);
236
#else
237
    struct pex_device *pDevice = inode_to_device(&device_list, inode);
238
#endif
239
    if(!pDevice) {
240
        err_msg(err_trace, "%s(): ioctl device failed\n", __FUNCTION__);
241
        return -ENODEV;
242
    }
243
 
244
    mutex_lock(&pDevice->m_BoardMutex);
245
 
246
    switch(cmd) {
247
    case IOCTL_PEX_BOARD_INFO:
248
        error = ioctl_board_info(pDevice, arg);
249
        break;
250
    case IOCTL_PEX_MEM_ALLOC:
251
        error = ioctl_memory_alloc(pDevice, arg);
252
        break;
253
    case IOCTL_PEX_MEM_FREE:
254
        error = ioctl_memory_free(pDevice, arg);
255
        break;
256
    case IOCTL_PEX_STUB_ALLOC:
257
        error = ioctl_stub_alloc(pDevice, arg);
258
        break;
259
    case IOCTL_PEX_STUB_FREE:
260
        error = ioctl_stub_free(pDevice, arg);
261
        break;
262
    case IOCTL_AMB_SET_MEMIO:
263
        error = ioctl_set_mem(pDevice, arg);
264
        break;
265
    case IOCTL_AMB_FREE_MEMIO:
266
        error = ioctl_free_mem(pDevice, arg);
267
        break;
268
    case IOCTL_AMB_START_MEMIO:
269
        error = ioctl_start_mem(pDevice, arg);
270
        break;
271
    case IOCTL_AMB_STOP_MEMIO:
272
        error = ioctl_stop_mem(pDevice, arg);
273
        break;
274
    case IOCTL_AMB_STATE_MEMIO:
275
        error = ioctl_state_mem(pDevice, arg);
276
        break;
277
    case IOCTL_AMB_WAIT_DMA_BUFFER:
278
        error = ioctl_wait_dma_buffer(pDevice, arg);
279
        break;
280
    case IOCTL_AMB_WAIT_DMA_BLOCK:
281
        error = ioctl_wait_dma_block(pDevice, arg);
282
        break;
283
    case IOCTL_AMB_SET_SRC_MEM:
284
        error = ioctl_set_src_mem(pDevice, arg);
285
        break;
286
    case IOCTL_AMB_SET_DIR_MEM:
287
        error = ioctl_set_dir_mem(pDevice, arg);
288
        break;
289
    case IOCTL_AMB_SET_DRQ_MEM:
290
        error = ioctl_set_drq_mem(pDevice, arg);
291
        break;
292
    case IOCTL_AMB_RESET_FIFO:
293
        error = ioctl_reset_fifo(pDevice, arg);
294
        break;
295
    case IOCTL_AMB_DONE:
296
        error = ioctl_done(pDevice, arg);
297
        break;
298
    case IOCTL_AMB_ADJUST:
299
        error = ioctl_adjust(pDevice, arg);
300
        break;
301
 
302
    default:
303
        dbg_msg(dbg_trace, "%s(): Unknown command\n", __FUNCTION__);
304
        error = -EINVAL;
305
        break;
306
    }
307
 
308
    mutex_unlock(&pDevice->m_BoardMutex);
309
 
310
    return error;
311
}
312
 
313
//-----------------------------------------------------------------------------
314
 
315
static inline int private_mapping_ok(struct vm_area_struct *vma)
316
{
317
    return vma->vm_flags & VM_MAYSHARE;
318
}
319
 
320
//-----------------------------------------------------------------------------
321
 
322
static int pex_device_mmap(struct file *file, struct vm_area_struct *vma)
323
{
324
    size_t size = vma->vm_end - vma->vm_start;
325
 
326
    if (!private_mapping_ok(vma))
327
        return -ENOSYS;
328
 
329
    vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
330
 
331
    if (remap_pfn_range(vma,
332
                        vma->vm_start,
333
                        vma->vm_pgoff,
334
                        size,
335
                        vma->vm_page_prot)) {
336
        err_msg(err_trace, "%s(): error in remap_page_range.\n", __FUNCTION__ );
337
        return -EAGAIN;
338
    }
339
    return 0;
340
}
341
 
342
//-----------------------------------------------------------------------------
343
 
344 54 v.karak
void tasklet_isr( unsigned long Context )
345 2 dsmv
{
346 54 v.karak
    struct CDmaChannel *DmaChannel = (struct CDmaChannel *)Context;
347
    unsigned long flags = 0;
348 2 dsmv
 
349 54 v.karak
    spin_lock_irqsave(&DmaChannel->m_DmaLock, flags);
350 2 dsmv
 
351 54 v.karak
    printk("%s(): [DMA%d] m_CurBlockNum = %d, m_BlockCount = %d\n",
352
           __FUNCTION__, DmaChannel->m_NumberOfChannel, DmaChannel->m_CurBlockNum, DmaChannel->m_BlockCount );
353
    DmaChannel->m_State = *DmaChannel->m_pStub;
354
 
355
    SetEvent( &DmaChannel->m_BlockEndEvent );
356
 
357
    if(DmaChannel->m_CurBlockNum >= DmaChannel->m_BlockCount)
358
    {
359
        HwCompleteDmaTransfer(DmaChannel->m_Board,DmaChannel->m_NumberOfChannel);
360
        SetEvent( &DmaChannel->m_BufferEndEvent );
361
        wake_up_interruptible(&DmaChannel->m_DmaWq);
362
        printk("%s(): Wake up!\n", __FUNCTION__);
363 2 dsmv
    }
364 54 v.karak
 
365
    spin_unlock_irqrestore(&DmaChannel->m_DmaLock, flags);
366 2 dsmv
}
367
 
368
//-----------------------------------------------------------------------------
369
 
370
static irqreturn_t pex_device_isr( int irq, void *pContext )
371
{
372
    FIFO_STATUS FifoStatus;  //
373
 
374
    struct pex_device* pDevice = (struct pex_device*)pContext;            // our device
375
 
376
    if(!pDevice->m_DmaIrqEnbl && !pDevice->m_FlgIrqEnbl)
377
        return IRQ_NONE;
378
 
379
    if(pDevice->m_FlgIrqEnbl)
380
    {  // прерывание от флагов состояния
381
        /*
382
            u32 status = ReadOperationWordReg(pDevice, PEMAINadr_BRD_STATUS);
383
            err_msg(err_trace, "%s(): BRD_STATUS = 0x%X.\n", __FUNCTION__, status);
384
            if(status & 0x4000)
385
            {
386
                    for(int i = 0; i < NUM_TETR_IRQ; i++)
387
                            if(pDevice->m_TetrIrq[i] != 0)
388
                            {
389
                                    u32 status = ReadAmbMainReg(pDevice, pDevice->m_TetrIrq[i].Address);
390
                                    KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
391
                                                            i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
392
                                    status ^= pDevice->m_TetrIrq[i].IrqInv;
393
                                    status &= pDevice->m_TetrIrq[i].IrqMask;
394
                                    KdPrint(("CWambpex::WambpexIsr: TetrIrq = %d, Address = 0x%X, IrqInv = 0x%X, IrqMask = 0x%X, Status = 0x%X.\n",
395
                                                            i, pDevice->m_TetrIrq[i].Address, pDevice->m_TetrIrq[i].IrqInv, pDevice->m_TetrIrq[i].IrqMask, status));
396
                                    if(status)
397
                                    {
398
                                            KeInsertQueueDpc(&pDevice->m_TetrIrq[i].Dpc, NULL, NULL);
399
                                            KdPrint(("CWambpex::WambpexIsr - Tetrad IRQ address = %d\n", pDevice->m_TetrIrq[i].Address));
400
                                            // сброс статусного бита, вызвавшего прерывание
401
                                            //pDevice->WriteAmbMainReg(pDevice->m_TetrIrq[i].Address + 0x200);
402
                                            ULONG CmdAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_ADR * REG_SIZE;
403
                                            pDevice->WriteAmbMainReg(CmdAddress, 0);
404
                                            ULONG DataAddress = pDevice->m_TetrIrq[i].Address + TRDadr_CMD_DATA * REG_SIZE;
405
                                            ULONG Mode0Value = pDevice->ReadAmbMainReg(DataAddress);
406
                                            Mode0Value &= 0xFFFB;
407
                                            //pDevice->WriteAmbMainReg(CmdAddress, 0);
408
                                            pDevice->WriteAmbMainReg(DataAddress, Mode0Value);
409
                                            break;
410
                                    }
411
                            }
412
                return IRQ_HANDLED;
413
            }
414
            else // вообще не наше прерывание !!!
415
                    return IRQ_NONE;    // we did not interrupt
416
            */
417
    }
418
 
419
    if(pDevice->m_DmaIrqEnbl)
420
    {   // прерывание от каналов ПДП
421
        u32 i=0;
422
        u32 FifoAddr = 0;
423
        u32 iChan = pDevice->m_primChan;
424
        u32 NumberOfChannel = -1;
425
 
426
        for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++)
427
        {
428
            if(pDevice->m_DmaChanMask & (1 << iChan))
429
            {
430
                FifoAddr = pDevice->m_FifoAddr[iChan];
431
                FifoStatus.AsWhole = ReadOperationWordReg(pDevice, PEFIFOadr_FIFO_STATUS + FifoAddr);
432
                if(FifoStatus.ByBits.IntRql)
433
                {
434 54 v.karak
                    err_msg(err_trace, "%s(): - Channel = %d, Fifo Status = 0x%X\n", __FUNCTION__, iChan, FifoStatus.AsWhole);
435 2 dsmv
                    NumberOfChannel = iChan;
436
                    pDevice->m_primChan = ((pDevice->m_primChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : pDevice->m_primChan+1;
437
                    break;
438
                }
439
            }
440
            iChan = ((iChan+1) >= MAX_NUMBER_OF_DMACHANNELS) ? 0 : iChan+1;
441
        }
442
 
443
        if(NumberOfChannel != -1)
444
        {
445
            u32 flag = 0;
446
 
447 54 v.karak
            err_msg(err_trace, "%s(%d)\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ));
448 2 dsmv
 
449
            flag = NextDmaTransfer(pDevice->m_DmaChannel[NumberOfChannel]);
450 54 v.karak
            if(!flag)
451 2 dsmv
            {
452
                DMA_CTRL_EXT CtrlExt;
453
                CtrlExt.AsWhole = 0;
454
                CtrlExt.ByBits.Pause = 1;
455
                CtrlExt.ByBits.Start = 1;
456
                WriteOperationWordReg(pDevice, PEFIFOadr_DMA_CTRL + FifoAddr, CtrlExt.AsWhole);
457
                //err_msg(err_trace, "%s(): - Pause (%d) - m_CurBlockNum = %d, m_DoneBlock = %d\n", __FUNCTION__, atomic_read(&pDevice->m_TotalIRQ),
458
                //        pDevice->m_DmaChannel[NumberOfChannel]->m_CurBlockNum,
459
                //        pDevice->m_DmaChannel[NumberOfChannel]->m_DoneBlock);
460
            }
461
 
462
            //err_msg(err_trace, "%s(): - Flag Clear\n", __FUNCTION__);
463
            WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x10);
464
            WriteOperationWordReg(pDevice, PEFIFOadr_FLAG_CLR + FifoAddr, 0x00);
465
            //err_msg(err_trace, "%s(): - Complete\n", __FUNCTION__);
466
 
467
            atomic_inc(&pDevice->m_TotalIRQ);
468
 
469
            return IRQ_HANDLED;
470
        }
471
    }
472
    return IRQ_NONE;    // we did not interrupt
473
}
474
 
475
//-----------------------------------------------------------------------------
476
 
477
struct file_operations pex_fops = {
478
 
479
    .owner = THIS_MODULE,
480
    .read = NULL,
481
    .write = NULL,
482
 
483
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
484
    .unlocked_ioctl = pex_device_ioctl,
485
    .compat_ioctl = pex_device_ioctl,
486
#else
487
    .ioctl = pex_device_ioctl,
488
#endif
489
 
490
    .mmap = pex_device_mmap,
491
    .open = pex_device_open,
492
    .release = pex_device_close,
493
    .fasync = pex_device_fasync,
494
    .poll = pex_device_poll,
495
};
496
 
497
//-----------------------------------------------------------------------------
498
//-----------------------------------------------------------------------------
499
//-----------------------------------------------------------------------------
500
//-----------------------------------------------------------------------------
501
 
502
static const struct pci_device_id pex_device_id[] = {
503
{
504
        .vendor =       INSYS_VENDOR_ID,
505
        .device =       AMBPEX5_DEVID,
506
        .subvendor =    PCI_ANY_ID,
507
        .subdevice =    PCI_ANY_ID,
508
},
509 30 dsmv
 
510 2 dsmv
{ },
511
};
512
 
513
MODULE_DEVICE_TABLE(pci, pex_device_id);
514
 
515
//-----------------------------------------------------------------------------
516
 
517 34 v.karak
static int pex_device_probe(struct pci_dev *dev, const struct pci_device_id *id)
518 2 dsmv
{
519
    int error = 0;
520
    int i = 0;
521
    struct pex_device *brd = NULL;
522
 
523
    mutex_lock(&pex_mutex);
524
 
525
    brd = kzalloc(sizeof(struct pex_device), GFP_KERNEL);
526
    if(!brd) {
527
        error = -ENOMEM;
528
        goto do_out;
529
    }
530
 
531
    INIT_LIST_HEAD(&brd->m_list);
532
    mutex_init(&brd->m_BoardMutex);
533
    sema_init(&brd->m_BoardSem, 1);
534
    spin_lock_init(&brd->m_BoardLock);
535
    atomic_set(&brd->m_TotalIRQ, 0);
536
    init_waitqueue_head(&brd->m_WaitQueue);
537
    init_timer(&brd->m_TimeoutTimer);
538
    spin_lock_init(&brd->m_MemListLock);
539
    atomic_set(&brd->m_MemListCount, 0);
540
    INIT_LIST_HEAD(&brd->m_MemList);
541
    brd->m_pci = dev;
542
    brd->m_Interrupt = -1;
543
    brd->m_DmaIrqEnbl = 0;
544
    brd->m_FlgIrqEnbl = 0;
545
    brd->m_class = pex_class;
546
 
547
    set_device_name(brd, dev->device, boards_count);
548
 
549
    dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x, board name %s\n", __FUNCTION__, dev->device, dev->vendor, brd->m_name);
550
 
551
    error = pci_enable_device(dev);
552
    if(error) {
553
        err_msg(err_trace, "%s(): error enabling pci device\n", __FUNCTION__);
554
        goto do_free_memory;
555
    }
556
 
557
    if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) || pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) {
558
        printk("%s(): error set pci dma mask\n", __FUNCTION__);
559
        goto do_disable_device;
560
    }
561
 
562
    pci_set_master(dev);
563
 
564
    brd->m_BAR0.physical_address = pci_resource_start(dev, 0);
565
    brd->m_BAR0.size = pci_resource_len(dev, 0);
566
    brd->m_BAR0.virtual_address = ioremap_nocache(brd->m_BAR0.physical_address, brd->m_BAR0.size);
567
    if(!brd->m_BAR0.virtual_address) {
568
        error = -ENOMEM;
569
        err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
570
        goto do_disable_device;
571
    }
572
 
573
    dbg_msg(dbg_trace, "%s(): map bar0 %zx -> %p\n", __FUNCTION__, brd->m_BAR0.physical_address, brd->m_BAR0.virtual_address);
574
 
575
    brd->m_BAR1.physical_address = pci_resource_start(dev, 1);
576
    brd->m_BAR1.size = pci_resource_len(dev, 1);
577
    brd->m_BAR1.virtual_address = ioremap_nocache(brd->m_BAR1.physical_address, brd->m_BAR1.size);
578
    if(!brd->m_BAR1.virtual_address) {
579
        error = -ENOMEM;
580
        err_msg(err_trace, "%s(): error map device memory at bar%d\n", __FUNCTION__, 0);
581
        goto do_unmap_bar0;
582
    }
583
 
584
    dbg_msg(dbg_trace, "%s(): map bar1 %zx -> %p\n", __FUNCTION__, brd->m_BAR1.physical_address, brd->m_BAR1.virtual_address);
585
 
586
    error = request_irq(dev->irq, pex_device_isr, IRQF_SHARED, brd->m_name, brd);
587
    if( error < 0) {
588
        error = -EBUSY;
589
        err_msg( err_trace, "%s(): error in request_irq()\n", __FUNCTION__ );
590
        goto do_unmap_bar1;
591
    }
592
 
593
    brd->m_Interrupt = dev->irq;
594
 
595
    cdev_init(&brd->m_cdev, &pex_fops);
596
    brd->m_cdev.owner = THIS_MODULE;
597
    brd->m_cdev.ops = &pex_fops;
598
    brd->m_devno = MKDEV(MAJOR(devno), boards_count);
599
 
600
    error = cdev_add(&brd->m_cdev, brd->m_devno, 1);
601
    if(error) {
602
        err_msg(err_trace, "%s(): Error add char device %d\n", __FUNCTION__, boards_count);
603
        error = -EINVAL;
604
        goto do_free_irq;
605
    }
606
 
607
    dbg_msg(dbg_trace, "%s(): Add cdev %d\n", __FUNCTION__, boards_count);
608
 
609
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
610 6 v.karak
    brd->m_device = device_create(pex_class, NULL, brd->m_devno, "%s%d", "pexdrv", boards_count);
611 2 dsmv
#else
612 6 v.karak
    brd->m_device = device_create(pex_class, NULL, brd->m_devno, NULL, "%s%d", "pexdrv", boards_count);
613 2 dsmv
#endif
614
    if(!brd->m_device ) {
615
        err_msg(err_trace, "%s(): Error create device for board: %s\n", __FUNCTION__, brd->m_name);
616
        error = -EINVAL;
617
        goto do_delete_cdev;
618
    }
619
 
620
    dbg_msg(dbg_trace, "%s(): Create device file for board: %s\n", __FUNCTION__, brd->m_name);
621
 
622
    brd->m_BoardIndex = boards_count;
623
 
624 54 v.karak
    if(pci_set_dma_mask(brd->m_pci, DMA_BIT_MASK(32))) {
625
        dbg_msg(dbg_trace, "%s():  Error in dma_set_mask_and_coherent() - no suitable DMA available\n", __FUNCTION__);
626
    }
627
 
628
    if (dma_set_mask_and_coherent(&brd->m_pci->dev, DMA_BIT_MASK(32))) {
629
            dbg_msg(dbg_trace, "%s():  Error in dma_set_mask_and_coherent() - no suitable DMA available\n", __FUNCTION__);
630
    }
631
 
632 2 dsmv
    InitializeBoard(brd);
633
 
634
    for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
635
 
636
        if(brd->m_DmaChanMask & (1 << i)) {
637
 
638 54 v.karak
            brd->m_DmaChannel[i] = CDmaChannelCreate( i,  tasklet_isr, brd,
639 2 dsmv
                                                      &brd->m_pci->dev,
640
                                                      brd->m_MaxDmaSize[i],
641
                                                      brd->m_BlockFifoId[i], 1 );
642
        }
643
    }
644
 
645 54 v.karak
    pex_register_proc(brd->m_name, 0, brd);
646 2 dsmv
 
647
    list_add_tail(&brd->m_list, &device_list);
648
 
649
    boards_count++;
650
 
651
    dbg_msg(dbg_trace, "%s(): Board %s - setup complete\n", __FUNCTION__, brd->m_name);
652
 
653
    mutex_unlock(&pex_mutex);
654
 
655
    return error;
656
 
657
do_delete_cdev:
658
    cdev_del(&brd->m_cdev);
659
 
660
do_free_irq:
661
    free_irq(brd->m_Interrupt, brd);
662
 
663
do_unmap_bar1:
664
    iounmap(brd->m_BAR1.virtual_address);
665
 
666
do_unmap_bar0:
667
    iounmap(brd->m_BAR0.virtual_address);
668
 
669
 
670
do_disable_device:
671
    pci_disable_device(dev);
672
 
673
do_free_memory:
674
    kfree(brd);
675
 
676
do_out:
677
    mutex_unlock(&pex_mutex);
678
 
679
    return error;
680
}
681
 
682
//-----------------------------------------------------------------------------
683
 
684 34 v.karak
static void pex_device_remove(struct pci_dev *dev)
685 2 dsmv
{
686
    struct list_head *pos, *n;
687
    struct pex_device *brd = NULL;
688 6 v.karak
    int i = 0;
689 2 dsmv
 
690
    dbg_msg(dbg_trace, "%s(): device_id = %x, vendor_id = %x\n", __FUNCTION__, dev->device, dev->vendor);
691
 
692
    mutex_lock(&pex_mutex);
693
 
694
    list_for_each_safe(pos, n, &device_list) {
695
 
696
        brd = list_entry(pos, struct pex_device, m_list);
697
 
698
        if(brd->m_pci == dev) {
699
 
700
            free_irq(brd->m_Interrupt, brd);
701
            dbg_msg(dbg_trace, "%s(): free_irq() - complete\n", __FUNCTION__);
702
            pex_remove_proc(brd->m_name);
703
            dbg_msg(dbg_trace, "%s(): pex_remove_proc() - complete\n", __FUNCTION__);
704 6 v.karak
            for(i = 0; i < MAX_NUMBER_OF_DMACHANNELS; i++) {
705
                if(brd->m_DmaChannel[i]) {
706
                     CDmaChannelDelete(brd->m_DmaChannel[i]);
707
                     dbg_msg(dbg_trace, "%s(): free DMA channel %d - complete\n", __FUNCTION__, i);
708
                }
709
            }
710 2 dsmv
            free_memory(brd);
711
            dbg_msg(dbg_trace, "%s(): free_memory() - complete\n", __FUNCTION__);
712
            device_destroy(pex_class, brd->m_devno);
713
            dbg_msg(dbg_trace, "%s(): device_destroy() - complete\n", __FUNCTION__);
714
            cdev_del(&brd->m_cdev);
715
            dbg_msg(dbg_trace, "%s(): cdev_del() - complete\n", __FUNCTION__);
716
            iounmap(brd->m_BAR1.virtual_address);
717
            dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
718
            iounmap(brd->m_BAR0.virtual_address);
719
            dbg_msg(dbg_trace, "%s(): iounmap() - complete\n", __FUNCTION__);
720
            pci_disable_device(dev);
721
            dbg_msg(dbg_trace, "%s(): pci_disable_device() - complete\n", __FUNCTION__);
722
            list_del(pos);
723
            dbg_msg(dbg_trace, "%s(): list_del() - complete\n", __FUNCTION__);
724
            kfree(brd);
725
            dbg_msg(dbg_trace, "%s(): kfree() - complete\n", __FUNCTION__);
726
        }
727
    }
728
 
729
    mutex_unlock(&pex_mutex);
730
}
731
 
732
//-----------------------------------------------------------------------------
733
 
734
static struct pci_driver pex_pci_driver = {
735
 
736
    .name = PEX_DRIVER_NAME,
737
    .id_table = pex_device_id,
738
    .probe = pex_device_probe,
739
    .remove = pex_device_remove,
740
};
741
 
742
//-----------------------------------------------------------------------------
743
 
744
static int __init pex_module_init(void)
745
{
746
    int error = 0;
747
 
748
    dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
749
 
750
    mutex_init(&pex_mutex);
751
 
752
    error = alloc_chrdev_region(&devno, 0, MAX_PEXDEVICE_SUPPORT, PEX_DRIVER_NAME);
753
    if(error < 0) {
754
        err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
755
        goto do_out;
756
    }
757
 
758
    dbg_msg(dbg_trace, "%s(): Allocate %d device numbers. Major number = %d\n", __FUNCTION__, MAX_PEXDEVICE_SUPPORT, MAJOR(devno));
759
 
760
    pex_class = class_create(THIS_MODULE, PEX_DRIVER_NAME);
761
    if(!pex_class) {
762
        err_msg(err_trace, "%s(): Erorr allocate char device regions\n", __FUNCTION__);
763
        error = -EINVAL;
764
        goto do_free_chrdev;
765
    }
766
 
767
    error = pci_register_driver(&pex_pci_driver);
768
    if(error < 0) {
769
        err_msg(err_trace, "%s(): Erorr register pci driver\n", __FUNCTION__);
770
        error = -EINVAL;
771
        goto do_delete_class;
772
    }
773
 
774
    return 0;
775
 
776
do_delete_class:
777
    class_destroy(pex_class);
778
 
779
do_free_chrdev:
780
    unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
781
 
782
do_out:
783
    return error;
784
}
785
 
786
//-----------------------------------------------------------------------------
787
 
788
static void __exit pex_module_cleanup(void)
789
{
790
    dbg_msg(dbg_trace, "%s()\n", __FUNCTION__);
791
 
792
    pci_unregister_driver(&pex_pci_driver);
793
 
794
    if(pex_class)
795
        class_destroy(pex_class);
796
 
797
    unregister_chrdev_region(devno, MAX_PEXDEVICE_SUPPORT);
798
}
799
 
800
//-----------------------------------------------------------------------------
801
 
802
module_init(pex_module_init);
803
module_exit(pex_module_cleanup);
804
 
805
//-----------------------------------------------------------------------------

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.