OpenCores
URL https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk

Subversion Repositories pcie_ds_dma

[/] [pcie_ds_dma/] [trunk/] [soft/] [linux/] [driver/] [pexdrv/] [memory.c] - Blame information for rev 36

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 v.karak
 
2
#include <linux/kernel.h>
3
#define __NO_VERSION__
4
#include <linux/module.h>
5
#include <linux/types.h>
6
#include <linux/ioport.h>
7
#include <linux/pci.h>
8
#include <linux/pagemap.h>
9
#include <linux/interrupt.h>
10
#include <linux/proc_fs.h>
11
#include <asm/io.h>
12
 
13
#include "memory.h"
14
#include "pexmodule.h"
15
 
16
//--------------------------------------------------------------------
17
 
18
int lock_pages( void *va, u32 size )
19
{
20
    struct page *start_page_addr = virt_to_page(va);
21
    int i = 0;
22
 
23
    for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) {
24
        SetPageReserved(start_page_addr+i);
25
        //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i));
26
    }
27
 
28
    return i;
29
}
30
 
31
//--------------------------------------------------------------------
32
 
33
int unlock_pages( void *va, u32 size )
34
{
35
    struct page *start_page_addr = virt_to_page(va);
36
    int i = 0;
37
 
38
    for (i=0; i < (size >> PAGE_CACHE_SHIFT); i++) {
39
        ClearPageReserved(start_page_addr+i);
40
        //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i));
41
    }
42
 
43
    return i;
44
}
45
 
46
//--------------------------------------------------------------------
47
 
48
//--------------------------------------------------------------------
49
/*
50
static int copy_memory_descriptors(unsigned long arg, struct memory_descriptor *md, struct memory_block **mb)
51
{
52
    struct memory_block *mblocks = NULL;
53
    int error = 0;
54
    //int i = 0;
55
 
56
    if(copy_from_user((void*)md, (void*)arg, sizeof(struct memory_descriptor))) {
57
        err_msg(err_trace, "%s(): Error copy memory descriptor from user space\n", __FUNCTION__);
58
        error = -EINVAL;
59
        goto do_exit;
60
    }
61
 
62
    dbg_msg(dbg_trace, "%s(): md.total_blocks = %zd\n", __FUNCTION__, md->total_blocks );
63
    dbg_msg(dbg_trace, "%s(): md.blocks = %p\n", __FUNCTION__, md->blocks );
64
 
65
    mblocks = kzalloc(md->total_blocks*sizeof(struct memory_block), GFP_KERNEL);
66
    if(!mblocks) {
67
        err_msg(err_trace, "%s(): Error allocate memory for memory descriptors\n", __FUNCTION__);
68
        error = -ENOMEM;
69
        goto do_exit;
70
    }
71
 
72
    if(copy_from_user((void*)mblocks, (void*)md->blocks, md->total_blocks*sizeof(struct memory_block))) {
73
        err_msg(err_trace, "%s(): Error copy memory blocks from user space\n", __FUNCTION__);
74
        error = -EINVAL;
75
        goto do_free_mem;
76
    }
77
 
78
    //for(i=0; i<md->total_blocks; i++) {
79
    //    dbg_msg(dbg_trace, "%s(): mb[%d].size = 0x%x\n", __FUNCTION__, i, mblocks[i].size );
80
    //}
81
 
82
    *mb = mblocks;
83
 
84
    return 0;
85
 
86
do_free_mem:
87
    kfree(mb);
88
 
89
do_exit:
90
    return error;
91
}
92
*/
93
//-----------------------------------------------------------------------------
94
 
95
int lock_user_pages(unsigned long addr, int size)
96
{
97
    //int res = 0;
98
    //res = get_user_pages(current, current->mm, unsigned long start, int nr_pages, int write, int force,
99
    //                     struct page **pages, struct vm_area_struct **vmas);
100
    return -1;
101
}
102
 
103
//-----------------------------------------------------------------------------
104
 
105
void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr)
106
{
107
    struct mem_t *m = NULL;
108
    void *cpu_addr = NULL;
109
    dma_addr_t dma_handle = {0};
110
    int locked = 0;
111
 
112
    spin_lock(&brd->m_MemListLock);
113
 
114
    m = (struct mem_t*)kzalloc(sizeof(struct mem_t), GFP_KERNEL);
115
    if(!m) {
116
        err_msg(err_trace, "%s(): Error allocate memory for mem_t descriptor\n", __FUNCTION__);
117
        goto do_exit;
118
    }
119
 
120
    cpu_addr = dma_alloc_coherent(&brd->m_pci->dev, block_size, &dma_handle, GFP_KERNEL);
121
    if(!cpu_addr) {
122
        err_msg(err_trace, "%s(): Error allocate physical memory block.\n", __FUNCTION__);
123
        goto do_free_mem;
124
    }
125
 
126
    *dma_addr = dma_handle;
127
    m->dma_handle = dma_handle;
128
    m->cpu_addr = cpu_addr;
129
    m->size = block_size;
130
 
131
    locked = lock_pages(m->cpu_addr, m->size);
132
 
133
    list_add_tail(&m->list, &brd->m_MemList);
134
 
135
    atomic_inc(&brd->m_MemListCount);
136
 
137
    dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
138
            __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, locked );
139
 
140
    spin_unlock(&brd->m_MemListLock);
141
 
142
    return cpu_addr;
143
 
144
do_free_mem:
145
    kfree(m);
146
 
147
do_exit:
148
    spin_unlock(&brd->m_MemListLock);
149
 
150
    return NULL;
151
}
152
 
153
//--------------------------------------------------------------------
154
 
155
int free_memory_block(struct pex_device *brd, struct memory_block mb)
156
{
157
    struct list_head *pos, *n;
158
    struct mem_t *m = NULL;
159
    int unlocked = 0;
160
 
161
    spin_lock(&brd->m_MemListLock);
162
 
163
    list_for_each_safe(pos, n, &brd->m_MemList) {
164
 
165
        m = list_entry(pos, struct mem_t, list);
166
 
167
        if(m->dma_handle != mb.phys)
168
            continue;
169
 
170
        unlocked = unlock_pages(m->cpu_addr, m->size);
171
 
172
        dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle);
173
 
174
        dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
175
                __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked );
176
 
177
        list_del(pos);
178
 
179
        atomic_dec(&brd->m_MemListCount);
180
 
181
        kfree(m);
182
    }
183
 
184
    spin_unlock(&brd->m_MemListLock);
185
 
186
    return 0;
187
}
188
 
189
//--------------------------------------------------------------------
190
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.