URL
https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk
Subversion Repositories pcie_ds_dma
Compare Revisions
- This comparison shows the changes necessary to convert path
/pcie_ds_dma/trunk/soft/linux/driver/pexdrv
- from Rev 6 to Rev 7
- ↔ Reverse comparison
Rev 6 → Rev 7
/pexmodule.h
20,42 → 20,12
#ifndef _STREAMLL_H_ |
#include "streamll.h" |
#endif |
#ifndef _MEMORY_H_ |
#include "memory.h" |
#endif |
|
//----------------------------------------------------------------------------- |
|
struct address_t { |
|
size_t physical_address; |
void *virtual_address; |
size_t size; |
|
}; |
|
//----------------------------------------------------------------------------- |
|
struct mem_t { |
|
struct list_head list; |
dma_addr_t dma_handle; |
void *cpu_addr; |
size_t size; |
|
}; |
|
//----------------------------------------------------------------------------- |
|
struct dma_channel { |
int m_Number; |
int m_Use; |
struct pci_dev *m_pci; |
spinlock_t m_MemListLock; |
atomic_t m_MemListCount; |
struct list_head m_MemList; |
struct mem_t m_MemStub; |
}; |
|
//----------------------------------------------------------------------------- |
|
#define PEX_DRIVER_NAME "pex_driver" |
#define MAX_NUMBER_OF_DMACHANNELS 4 |
#define NUMBER_OF_PLDS 4 |
/memory.c
0,0 → 1,190
|
#include <linux/kernel.h> |
#define __NO_VERSION__ |
#include <linux/module.h> |
#include <linux/types.h> |
#include <linux/ioport.h> |
#include <linux/pci.h> |
#include <linux/pagemap.h> |
#include <linux/interrupt.h> |
#include <linux/proc_fs.h> |
#include <asm/io.h> |
|
#include "memory.h" |
#include "pexmodule.h" |
|
//-------------------------------------------------------------------- |
|
int lock_pages( void *va, u32 size ) |
{ |
struct page *start_page_addr = virt_to_page(va); |
int i = 0; |
|
for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) { |
SetPageReserved(start_page_addr+i); |
//dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); |
} |
|
return i; |
} |
|
//-------------------------------------------------------------------- |
|
int unlock_pages( void *va, u32 size ) |
{ |
struct page *start_page_addr = virt_to_page(va); |
int i = 0; |
|
for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) { |
ClearPageReserved(start_page_addr+i); |
//dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); |
} |
|
return i; |
} |
|
//-------------------------------------------------------------------- |
|
//-------------------------------------------------------------------- |
/* |
static int copy_memory_descriptors(unsigned long arg, struct memory_descriptor *md, struct memory_block **mb) |
{ |
struct memory_block *mblocks = NULL; |
int error = 0; |
//int i = 0; |
|
if(copy_from_user((void*)md, (void*)arg, sizeof(struct memory_descriptor))) { |
err_msg(err_trace, "%s(): Error copy memory descriptor from user space\n", __FUNCTION__); |
error = -EINVAL; |
goto do_exit; |
} |
|
dbg_msg(dbg_trace, "%s(): md.total_blocks = %zd\n", __FUNCTION__, md->total_blocks ); |
dbg_msg(dbg_trace, "%s(): md.blocks = %p\n", __FUNCTION__, md->blocks ); |
|
mblocks = kzalloc(md->total_blocks*sizeof(struct memory_block), GFP_KERNEL); |
if(!mblocks) { |
err_msg(err_trace, "%s(): Error allocate memory for memory descriptors\n", __FUNCTION__); |
error = -ENOMEM; |
goto do_exit; |
} |
|
if(copy_from_user((void*)mblocks, (void*)md->blocks, md->total_blocks*sizeof(struct memory_block))) { |
err_msg(err_trace, "%s(): Error copy memory blocks from user space\n", __FUNCTION__); |
error = -EINVAL; |
goto do_free_mem; |
} |
|
//for(i=0; i<md->total_blocks; i++) { |
// dbg_msg(dbg_trace, "%s(): mb[%d].size = 0x%x\n", __FUNCTION__, i, mblocks[i].size ); |
//} |
|
*mb = mblocks; |
|
return 0; |
|
do_free_mem: |
kfree(mb); |
|
do_exit: |
return error; |
} |
*/ |
//----------------------------------------------------------------------------- |
|
int lock_user_pages(unsigned long addr, int size) |
{ |
//int res = 0; |
//res = get_user_pages(current, current->mm, unsigned long start, int nr_pages, int write, int force, |
// struct page **pages, struct vm_area_struct **vmas); |
return -1; |
} |
|
//----------------------------------------------------------------------------- |
|
void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr) |
{ |
struct mem_t *m = NULL; |
void *cpu_addr = NULL; |
dma_addr_t dma_handle = {0}; |
int locked = 0; |
|
spin_lock(&brd->m_MemListLock); |
|
m = (struct mem_t*)kzalloc(sizeof(struct mem_t), GFP_KERNEL); |
if(!m) { |
err_msg(err_trace, "%s(): Error allocate memory for mem_t descriptor\n", __FUNCTION__); |
goto do_exit; |
} |
|
cpu_addr = dma_alloc_coherent(&brd->m_pci->dev, block_size, &dma_handle, GFP_KERNEL); |
if(!cpu_addr) { |
err_msg(err_trace, "%s(): Error allocate physical memory block.\n", __FUNCTION__); |
goto do_free_mem; |
} |
|
*dma_addr = dma_handle; |
m->dma_handle = dma_handle; |
m->cpu_addr = cpu_addr; |
m->size = block_size; |
|
locked = lock_pages(m->cpu_addr, m->size); |
|
list_add_tail(&m->list, &brd->m_MemList); |
|
atomic_inc(&brd->m_MemListCount); |
|
dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", |
__FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, locked ); |
|
spin_unlock(&brd->m_MemListLock); |
|
return cpu_addr; |
|
do_free_mem: |
kfree(m); |
|
do_exit: |
spin_unlock(&brd->m_MemListLock); |
|
return NULL; |
} |
|
//-------------------------------------------------------------------- |
|
int free_memory_block(struct pex_device *brd, struct memory_block mb) |
{ |
struct list_head *pos, *n; |
struct mem_t *m = NULL; |
int unlocked = 0; |
|
spin_lock(&brd->m_MemListLock); |
|
list_for_each_safe(pos, n, &brd->m_MemList) { |
|
m = list_entry(pos, struct mem_t, list); |
|
if(m->dma_handle != mb.phys) |
continue; |
|
unlocked = unlock_pages(m->cpu_addr, m->size); |
|
dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle); |
|
dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", |
__FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked ); |
|
list_del(pos); |
|
atomic_dec(&brd->m_MemListCount); |
|
kfree(m); |
} |
|
spin_unlock(&brd->m_MemListLock); |
|
return 0; |
} |
|
//-------------------------------------------------------------------- |
|
/memory.h
0,0 → 1,50
|
#ifndef _MEMORY_H_ |
#define _MEMORY_H_ |
|
//----------------------------------------------------------------------------- |
|
struct address_t { |
|
size_t physical_address; |
void *virtual_address; |
size_t size; |
|
}; |
|
//----------------------------------------------------------------------------- |
|
struct mem_t { |
|
struct list_head list; |
dma_addr_t dma_handle; |
void *cpu_addr; |
size_t size; |
|
}; |
|
//----------------------------------------------------------------------------- |
|
struct dma_channel { |
int m_Number; |
int m_Use; |
struct pci_dev *m_pci; |
spinlock_t m_MemListLock; |
atomic_t m_MemListCount; |
struct list_head m_MemList; |
struct mem_t m_MemStub; |
}; |
|
//----------------------------------------------------------------------------- |
struct pex_device; |
struct memory_block; |
//----------------------------------------------------------------------------- |
|
int lock_pages( void *va, u32 size ); |
int unlock_pages( void *va, u32 size ); |
void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr); |
int free_memory_block(struct pex_device *brd, struct memory_block mb); |
|
//-------------------------------------------------------------------- |
|
#endif //_MEMORY_H_ |
/insert
54,10 → 54,9
/sbin/rmmod $device |
status $? 0 |
else |
echo Loading module: ${module} |
echo -n " Loading pexdrv module : " |
fi |
|
echo -n " Loading pexdrv module : " |
insmod ./${module} |
|
until [ -e /dev/pexdrv0 ] |
/ioctlrw.c
54,138 → 54,7
} |
|
//-------------------------------------------------------------------- |
/* |
static int copy_memory_descriptors(unsigned long arg, struct memory_descriptor *md, struct memory_block **mb) |
{ |
struct memory_block *mblocks = NULL; |
int error = 0; |
//int i = 0; |
|
if(copy_from_user((void*)md, (void*)arg, sizeof(struct memory_descriptor))) { |
err_msg(err_trace, "%s(): Error copy memory descriptor from user space\n", __FUNCTION__); |
error = -EINVAL; |
goto do_exit; |
} |
|
dbg_msg(dbg_trace, "%s(): md.total_blocks = %zd\n", __FUNCTION__, md->total_blocks ); |
dbg_msg(dbg_trace, "%s(): md.blocks = %p\n", __FUNCTION__, md->blocks ); |
|
mblocks = kzalloc(md->total_blocks*sizeof(struct memory_block), GFP_KERNEL); |
if(!mblocks) { |
err_msg(err_trace, "%s(): Error allocate memory for memory descriptors\n", __FUNCTION__); |
error = -ENOMEM; |
goto do_exit; |
} |
|
if(copy_from_user((void*)mblocks, (void*)md->blocks, md->total_blocks*sizeof(struct memory_block))) { |
err_msg(err_trace, "%s(): Error copy memory blocks from user space\n", __FUNCTION__); |
error = -EINVAL; |
goto do_free_mem; |
} |
|
//for(i=0; i<md->total_blocks; i++) { |
// dbg_msg(dbg_trace, "%s(): mb[%d].size = 0x%x\n", __FUNCTION__, i, mblocks[i].size ); |
//} |
|
*mb = mblocks; |
|
return 0; |
|
do_free_mem: |
kfree(mb); |
|
do_exit: |
return error; |
} |
*/ |
//----------------------------------------------------------------------------- |
|
static void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr) |
{ |
struct mem_t *m = NULL; |
void *cpu_addr = NULL; |
dma_addr_t dma_handle = {0}; |
int locked = 0; |
|
spin_lock(&brd->m_MemListLock); |
|
m = (struct mem_t*)kzalloc(sizeof(struct mem_t), GFP_KERNEL); |
if(!m) { |
err_msg(err_trace, "%s(): Error allocate memory for mem_t descriptor\n", __FUNCTION__); |
goto do_exit; |
} |
|
cpu_addr = dma_alloc_coherent(&brd->m_pci->dev, block_size, &dma_handle, GFP_KERNEL); |
if(!cpu_addr) { |
err_msg(err_trace, "%s(): Error allocate physical memory block.\n", __FUNCTION__); |
goto do_free_mem; |
} |
|
*dma_addr = dma_handle; |
m->dma_handle = dma_handle; |
m->cpu_addr = cpu_addr; |
m->size = block_size; |
|
locked = lock_pages(m->cpu_addr, m->size); |
|
list_add_tail(&m->list, &brd->m_MemList); |
|
atomic_inc(&brd->m_MemListCount); |
|
dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", |
__FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, locked ); |
|
spin_unlock(&brd->m_MemListLock); |
|
return cpu_addr; |
|
do_free_mem: |
kfree(m); |
|
do_exit: |
spin_unlock(&brd->m_MemListLock); |
|
return NULL; |
} |
|
//-------------------------------------------------------------------- |
|
static int free_memory_block(struct pex_device *brd, struct memory_block mb) |
{ |
struct list_head *pos, *n; |
struct mem_t *m = NULL; |
int unlocked = 0; |
|
spin_lock(&brd->m_MemListLock); |
|
list_for_each_safe(pos, n, &brd->m_MemList) { |
|
m = list_entry(pos, struct mem_t, list); |
|
if(m->dma_handle != mb.phys) |
continue; |
|
unlocked = unlock_pages(m->cpu_addr, m->size); |
|
dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle); |
|
dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", |
__FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked ); |
|
list_del(pos); |
|
atomic_dec(&brd->m_MemListCount); |
|
kfree(m); |
} |
|
spin_unlock(&brd->m_MemListLock); |
|
return 0; |
} |
|
//-------------------------------------------------------------------- |
|
int ioctl_memory_alloc(struct pex_device *brd, unsigned long arg) |
{ |
struct memory_block mb = {0}; |
/Makefile
5,7 → 5,7
|
ifneq ($(KERNELRELEASE),) |
|
pexdrv-objs := event.o dmachan.o ioctlrw.o pexmodule.o pexproc.o hardware.o |
pexdrv-objs := memory.o event.o dmachan.o ioctlrw.o pexmodule.o pexproc.o hardware.o |
obj-m := pexdrv.o |
|
else |
32,4 → 32,4
endif |
|
install: |
./insert |
./insert |
/hardware.c
14,6 → 14,7
#include "pexmodule.h" |
#include "hardware.h" |
#include "ambpexregs.h" |
#include "memory.h" |
|
//-------------------------------------------------------------------- |
|
40,36 → 41,6
|
//-------------------------------------------------------------------- |
|
int lock_pages( void *va, u32 size ) |
{ |
struct page *start_page_addr = virt_to_page(va); |
int i = 0; |
|
for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) { |
SetPageReserved(start_page_addr+i); |
//dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); |
} |
|
return i; |
} |
|
//-------------------------------------------------------------------- |
|
int unlock_pages( void *va, u32 size ) |
{ |
struct page *start_page_addr = virt_to_page(va); |
int i = 0; |
|
for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) { |
ClearPageReserved(start_page_addr+i); |
//dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); |
} |
|
return i; |
} |
|
//-------------------------------------------------------------------- |
|
void read_memory32(u32 *src, u32 *dst, u32 cnt) |
{ |
int i=0; |