URL
https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk
Subversion Repositories pcie_ds_dma
[/] [pcie_ds_dma/] [trunk/] [soft/] [linux/] [driver/] [pexdrv/] [memory.c] - Rev 55
Go to most recent revision | Compare with Previous | Blame | View Log
#include <linux/kernel.h> #include <linux/version.h> #include <linux/module.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/pagemap.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <asm/io.h> #include "pexmodule.h" #include "memory.h" //-------------------------------------------------------------------- int lock_pages( void *va, u32 size ) { struct page *start_page_addr = virt_to_page(va); int i = 0; for (i=0; i < (size >> PAGE_SHIFT); i++) { SetPageReserved(start_page_addr+i); //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); } return i; } //-------------------------------------------------------------------- int unlock_pages( void *va, u32 size ) { struct page *start_page_addr = virt_to_page(va); int i = 0; for (i=0; i < (size >> PAGE_SHIFT); i++) { ClearPageReserved(start_page_addr+i); //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i)); } return i; } //-------------------------------------------------------------------- int check_address( void *pMemUserAddr ) { size_t addr = (size_t)pMemUserAddr; size_t mask = (size_t)~PAGE_MASK; printk("%s()\n", __FUNCTION__); // адрес пользовательского буфера должен быть выровнен на страницу if(addr & mask) { printk("%s(): %p - Error! Address must be aling at PAGE_SIZE border\n", __FUNCTION__, pMemUserAddr ); return 1; } return 0; } //-------------------------------------------------------------------- int check_size( size_t userSize ) { printk("%s()\n", __FUNCTION__); // размер пользовательского буфера должен быть кратен размеру страницы if((userSize % PAGE_SIZE) != 0) { printk("%s(): Invalid user memory block size - 0x%lX.\n", __FUNCTION__, userSize); return 1; } return 0; } //-------------------------------------------------------------------- int lock_user_memory( SHARED_MEMORY_DESCRIPTION *MemDscr, void* userSpaceAddress, size_t userSpaceSize ) { int i = 0; int requested_page_count = 0; int allocated_page_count = 0; printk("%s()\n", __FUNCTION__); if(!MemDscr) { printk("%s(): Invalid memory descriptor.\n", __FUNCTION__); return -EINVAL; } requested_page_count = (userSpaceSize >> PAGE_SHIFT); MemDscr->LockedPages = (struct page**)kmalloc(requested_page_count*sizeof(struct page*), GFP_KERNEL); if(!MemDscr->LockedPages) { printk("%s(): Cant allocate memory for locked pages pointers.\n", __FUNCTION__); return -ENOMEM; } memset(MemDscr->LockedPages,0,requested_page_count*sizeof(struct page*)); down_read(¤t->mm->mmap_sem); #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) allocated_page_count = get_user_pages(current, current->mm, (size_t)userSpaceAddress, requested_page_count, 1, 0, MemDscr->LockedPages, 0); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) allocated_page_count = get_user_pages((size_t)userSpaceAddress, requested_page_count, 1, 0, MemDscr->LockedPages, 0); #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0)) allocated_page_count = get_user_pages((size_t)userSpaceAddress, requested_page_count, 1, MemDscr->LockedPages, 0); #endif up_read(¤t->mm->mmap_sem); // если все ok то result содержит число страниц в массиве struct page *pages if(MemDscr->PageCount <= 0) { printk("%s(): Error to lock memory pages.\n", __FUNCTION__); kfree(MemDscr->LockedPages); MemDscr->LockedPages = NULL; MemDscr->PageCount = 0; return -ENOMEM; } printk("%s(): MemDscr->PageCount = %ld\n", __FUNCTION__, MemDscr->PageCount); printk("%s(): MemDscr->LockedPages = %p\n", __FUNCTION__, MemDscr->LockedPages); for(i=0; i<MemDscr->PageCount; i++) { printk("%s(): LockedPages[%d] = %p\n", __FUNCTION__, i, MemDscr->LockedPages[i]); printk("%s(): PhysicalAddress = %p\n", __FUNCTION__, (void*)page_to_phys(MemDscr->LockedPages[i])); if(!PageReserved(MemDscr->LockedPages[i])) { SetPageReserved(MemDscr->LockedPages[i]); } } printk("%s(): Lock %ld memory pages\n", __FUNCTION__, MemDscr->PageCount); return 0; } //-------------------------------------------------------------------- int unlock_user_memory( SHARED_MEMORY_DESCRIPTION *MemDscr ) { int i = 0; printk("%s()\n", __FUNCTION__); if(!MemDscr) { printk("%s(): Invalid parameter MemDscr = %p\n", __FUNCTION__, MemDscr); return -EINVAL; } printk("%s(): MemDscr = %p\n", __FUNCTION__, MemDscr); if(MemDscr->LockedPages) printk("%s(): MemDscr->LockedPages = %p\n", __FUNCTION__, MemDscr->LockedPages); for(i=0; i<MemDscr->PageCount; i++) { if(MemDscr->LockedPages[i]) { ClearPageReserved(MemDscr->LockedPages[i]); //page_cache_release(MemDscr->LockedPages[i]); printk("%s(): Unlock page %p\n", __FUNCTION__, MemDscr->LockedPages[i]); } } if(MemDscr->LockedPages) kfree(MemDscr->LockedPages); return 0; } //-------------------------------------------------------------------- void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr) { struct mem_t *m = NULL; void *cpu_addr = NULL; dma_addr_t dma_handle = {0}; int locked = 0; spin_lock(&brd->m_MemListLock); m = (struct mem_t*)kzalloc(sizeof(struct mem_t), GFP_KERNEL); if(!m) { err_msg(err_trace, "%s(): Error allocate memory for mem_t descriptor\n", __FUNCTION__); goto do_exit; } cpu_addr = dma_alloc_coherent(&brd->m_pci->dev, block_size, &dma_handle, GFP_KERNEL); if(!cpu_addr) { err_msg(err_trace, "%s(): Error allocate physical memory block.\n", __FUNCTION__); goto do_free_mem; } *dma_addr = dma_handle; m->dma_handle = dma_handle; m->cpu_addr = cpu_addr; m->size = block_size; locked = lock_pages(m->cpu_addr, m->size); list_add_tail(&m->list, &brd->m_MemList); atomic_inc(&brd->m_MemListCount); dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, locked ); spin_unlock(&brd->m_MemListLock); return cpu_addr; do_free_mem: kfree(m); do_exit: spin_unlock(&brd->m_MemListLock); return NULL; } //-------------------------------------------------------------------- int free_memory_block(struct pex_device *brd, struct memory_block mb) { struct list_head *pos, *n; struct mem_t *m = NULL; int unlocked = 0; spin_lock(&brd->m_MemListLock); list_for_each_safe(pos, n, &brd->m_MemList) { m = list_entry(pos, struct mem_t, list); if(m->dma_handle != mb.phys) continue; unlocked = unlock_pages(m->cpu_addr, m->size); dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle); dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n", __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked ); list_del(pos); atomic_dec(&brd->m_MemListCount); kfree(m); } spin_unlock(&brd->m_MemListLock); return 0; } //--------------------------------------------------------------------
Go to most recent revision | Compare with Previous | Blame | View Log