OpenCores
URL https://opencores.org/ocsvn/pcie_ds_dma/pcie_ds_dma/trunk

Subversion Repositories pcie_ds_dma

[/] [pcie_ds_dma/] [trunk/] [soft/] [linux/] [driver/] [pexdrv/] [memory.c] - Blame information for rev 55

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 7 v.karak
 
2
#include <linux/kernel.h>
3 54 v.karak
#include <linux/version.h>
4 7 v.karak
#include <linux/module.h>
5
#include <linux/types.h>
6
#include <linux/ioport.h>
7
#include <linux/pci.h>
8
#include <linux/pagemap.h>
9
#include <linux/interrupt.h>
10
#include <linux/proc_fs.h>
11 54 v.karak
#include <linux/sched.h>
12
 
13 7 v.karak
#include <asm/io.h>
14
 
15 54 v.karak
 
16
#include "pexmodule.h"
17 7 v.karak
#include "memory.h"
18
 
19
//--------------------------------------------------------------------
20
 
21
int lock_pages( void *va, u32 size )
22
{
23
    struct page *start_page_addr = virt_to_page(va);
24
    int i = 0;
25
 
26 54 v.karak
    for (i=0; i < (size >> PAGE_SHIFT); i++) {
27 7 v.karak
        SetPageReserved(start_page_addr+i);
28
        //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i));
29
    }
30
 
31
    return i;
32
}
33
 
34
//--------------------------------------------------------------------
35
 
36
int unlock_pages( void *va, u32 size )
37
{
38
    struct page *start_page_addr = virt_to_page(va);
39
    int i = 0;
40
 
41 54 v.karak
    for (i=0; i < (size >> PAGE_SHIFT); i++) {
42 7 v.karak
        ClearPageReserved(start_page_addr+i);
43
        //dbg_msg(dbg_trace, "%s(): page_addr[%d] = 0x%x\n", __FUNCTION__, i, (int)(start_page_addr+i));
44
    }
45
 
46
    return i;
47
}
48
 
49
//--------------------------------------------------------------------
50
 
51 54 v.karak
int check_address( void *pMemUserAddr )
52 7 v.karak
{
53 54 v.karak
        size_t addr = (size_t)pMemUserAddr;
54
        size_t mask = (size_t)~PAGE_MASK;
55 7 v.karak
 
56 54 v.karak
        printk("%s()\n", __FUNCTION__);
57 7 v.karak
 
58 54 v.karak
        // адрес пользовательского буфера должен быть выровнен на страницу
59
        if(addr & mask) {
60
            printk("%s(): %p - Error! Address must be aling at PAGE_SIZE border\n", __FUNCTION__, pMemUserAddr );
61
            return 1;
62
        }
63 7 v.karak
 
64 54 v.karak
        return 0;
65 7 v.karak
    }
66
 
67 54 v.karak
//--------------------------------------------------------------------
68 7 v.karak
 
69 54 v.karak
int check_size( size_t userSize )
70
{
71
        printk("%s()\n", __FUNCTION__);
72 7 v.karak
 
73 54 v.karak
        // размер пользовательского буфера должен быть кратен размеру страницы
74
        if((userSize % PAGE_SIZE) != 0) {
75
            printk("%s(): Invalid user memory block size - 0x%lX.\n", __FUNCTION__, userSize);
76
            return 1;
77
        }
78 7 v.karak
 
79 54 v.karak
        return 0;
80
}
81 7 v.karak
 
82 54 v.karak
//--------------------------------------------------------------------
83 7 v.karak
 
84 54 v.karak
int lock_user_memory( SHARED_MEMORY_DESCRIPTION *MemDscr, void* userSpaceAddress, size_t userSpaceSize )
85
{
86
        int i = 0;
87
        int requested_page_count = 0;
88
        int allocated_page_count = 0;
89
 
90
        printk("%s()\n", __FUNCTION__);
91
 
92
        if(!MemDscr) {
93
            printk("%s(): Invalid memory descriptor.\n", __FUNCTION__);
94
            return -EINVAL;
95
        }
96
 
97
        requested_page_count = (userSpaceSize >> PAGE_SHIFT);
98
 
99
        MemDscr->LockedPages = (struct page**)kmalloc(requested_page_count*sizeof(struct page*), GFP_KERNEL);
100
        if(!MemDscr->LockedPages) {
101
            printk("%s(): Cant allocate memory for locked pages pointers.\n", __FUNCTION__);
102
            return -ENOMEM;
103
        }
104
 
105
        memset(MemDscr->LockedPages,0,requested_page_count*sizeof(struct page*));
106
 
107
        down_read(&current->mm->mmap_sem);
108
#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
109
        allocated_page_count = get_user_pages(current,
110
                                          current->mm,
111
                                          (size_t)userSpaceAddress,
112
                                          requested_page_count,
113
                                          1,
114
                                          0,
115
                                          MemDscr->LockedPages,
116
                                          0);
117
#endif
118
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0))
119
        allocated_page_count = get_user_pages((size_t)userSpaceAddress,
120
                                          requested_page_count,
121
                                          1,
122
                                          0,
123
                                          MemDscr->LockedPages,
124
                                          0);
125
#endif
126
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))
127
        allocated_page_count = get_user_pages((size_t)userSpaceAddress,
128
                                          requested_page_count,
129
                                          1,
130
                                          MemDscr->LockedPages,
131
                                          0);
132
#endif
133
 
134
        up_read(&current->mm->mmap_sem);
135
 
136
        // если все ok то result содержит число страниц в массиве struct page *pages
137
        if(MemDscr->PageCount <= 0) {
138
            printk("%s(): Error to lock memory pages.\n", __FUNCTION__);
139
            kfree(MemDscr->LockedPages);
140
            MemDscr->LockedPages = NULL;
141
            MemDscr->PageCount = 0;
142
            return -ENOMEM;
143
        }
144
 
145
        printk("%s(): MemDscr->PageCount = %ld\n", __FUNCTION__, MemDscr->PageCount);
146
        printk("%s(): MemDscr->LockedPages = %p\n", __FUNCTION__, MemDscr->LockedPages);
147
 
148
        for(i=0; i<MemDscr->PageCount; i++) {
149
 
150
            printk("%s(): LockedPages[%d] = %p\n", __FUNCTION__, i, MemDscr->LockedPages[i]);
151
            printk("%s(): PhysicalAddress = %p\n", __FUNCTION__, (void*)page_to_phys(MemDscr->LockedPages[i]));
152
 
153
            if(!PageReserved(MemDscr->LockedPages[i])) {
154
                SetPageReserved(MemDscr->LockedPages[i]);
155
            }
156
        }
157
 
158
        printk("%s(): Lock %ld memory pages\n", __FUNCTION__, MemDscr->PageCount);
159
 
160
        return 0;
161 7 v.karak
}
162
 
163 54 v.karak
//--------------------------------------------------------------------
164
 
165
int unlock_user_memory( SHARED_MEMORY_DESCRIPTION *MemDscr )
166 7 v.karak
{
167 54 v.karak
        int i = 0;
168
 
169
        printk("%s()\n", __FUNCTION__);
170
 
171
        if(!MemDscr) {
172
            printk("%s(): Invalid parameter MemDscr = %p\n", __FUNCTION__, MemDscr);
173
            return -EINVAL;
174
        }
175
 
176
        printk("%s(): MemDscr = %p\n", __FUNCTION__, MemDscr);
177
 
178
        if(MemDscr->LockedPages)
179
            printk("%s(): MemDscr->LockedPages = %p\n", __FUNCTION__, MemDscr->LockedPages);
180
 
181
        for(i=0; i<MemDscr->PageCount; i++) {
182
            if(MemDscr->LockedPages[i]) {
183
                ClearPageReserved(MemDscr->LockedPages[i]);
184
                //page_cache_release(MemDscr->LockedPages[i]);
185
                printk("%s(): Unlock page %p\n", __FUNCTION__, MemDscr->LockedPages[i]);
186
            }
187
        }
188
 
189
        if(MemDscr->LockedPages)
190
            kfree(MemDscr->LockedPages);
191
 
192
        return 0;
193 7 v.karak
}
194
 
195 54 v.karak
//--------------------------------------------------------------------
196 7 v.karak
 
197
void* allocate_memory_block(struct pex_device *brd, size_t block_size, dma_addr_t *dma_addr)
198
{
199
    struct mem_t *m = NULL;
200
    void *cpu_addr = NULL;
201
    dma_addr_t dma_handle = {0};
202
    int locked = 0;
203
 
204
    spin_lock(&brd->m_MemListLock);
205
 
206
    m = (struct mem_t*)kzalloc(sizeof(struct mem_t), GFP_KERNEL);
207
    if(!m) {
208
        err_msg(err_trace, "%s(): Error allocate memory for mem_t descriptor\n", __FUNCTION__);
209
        goto do_exit;
210
    }
211
 
212
    cpu_addr = dma_alloc_coherent(&brd->m_pci->dev, block_size, &dma_handle, GFP_KERNEL);
213
    if(!cpu_addr) {
214
        err_msg(err_trace, "%s(): Error allocate physical memory block.\n", __FUNCTION__);
215
        goto do_free_mem;
216
    }
217
 
218
    *dma_addr = dma_handle;
219
    m->dma_handle = dma_handle;
220
    m->cpu_addr = cpu_addr;
221
    m->size = block_size;
222
 
223
    locked = lock_pages(m->cpu_addr, m->size);
224
 
225
    list_add_tail(&m->list, &brd->m_MemList);
226
 
227
    atomic_inc(&brd->m_MemListCount);
228
 
229
    dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
230
            __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, locked );
231
 
232
    spin_unlock(&brd->m_MemListLock);
233
 
234
    return cpu_addr;
235
 
236
do_free_mem:
237
    kfree(m);
238
 
239
do_exit:
240
    spin_unlock(&brd->m_MemListLock);
241
 
242
    return NULL;
243
}
244
 
245
//--------------------------------------------------------------------
246
 
247
int free_memory_block(struct pex_device *brd, struct memory_block mb)
248
{
249
    struct list_head *pos, *n;
250
    struct mem_t *m = NULL;
251
    int unlocked = 0;
252
 
253
    spin_lock(&brd->m_MemListLock);
254
 
255
    list_for_each_safe(pos, n, &brd->m_MemList) {
256
 
257
        m = list_entry(pos, struct mem_t, list);
258
 
259
        if(m->dma_handle != mb.phys)
260
            continue;
261
 
262
        unlocked = unlock_pages(m->cpu_addr, m->size);
263
 
264
        dma_free_coherent(&brd->m_pci->dev, m->size, m->cpu_addr, m->dma_handle);
265
 
266
        dbg_msg(dbg_trace, "%s(): %d: PA = 0x%zx, VA = %p, SZ = 0x%zx, PAGES = %d\n",
267
                __FUNCTION__, atomic_read(&brd->m_MemListCount), (size_t)m->dma_handle, m->cpu_addr, m->size, unlocked );
268
 
269
        list_del(pos);
270
 
271
        atomic_dec(&brd->m_MemListCount);
272
 
273
        kfree(m);
274
    }
275
 
276
    spin_unlock(&brd->m_MemListLock);
277
 
278
    return 0;
279
}
280
 
281
//--------------------------------------------------------------------
282
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.