OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [drm/] [drm_vm.h] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* drm_vm.h -- Memory mapping for DRM -*- linux-c -*-
2
 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
3
 *
4
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the "Software"),
10
 * to deal in the Software without restriction, including without limitation
11
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
 * and/or sell copies of the Software, and to permit persons to whom the
13
 * Software is furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the next
16
 * paragraph) shall be included in all copies or substantial portions of the
17
 * Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
 * OTHER DEALINGS IN THE SOFTWARE.
26
 *
27
 * Authors:
28
 *    Rickard E. (Rik) Faith <faith@valinux.com>
29
 *    Gareth Hughes <gareth@valinux.com>
30
 */
31
 
32
#include "drmP.h"
33
 
34
struct vm_operations_struct   DRM(vm_ops) = {
35
        nopage:  DRM(vm_nopage),
36
        open:    DRM(vm_open),
37
        close:   DRM(vm_close),
38
};
39
 
40
struct vm_operations_struct   DRM(vm_shm_ops) = {
41
        nopage:  DRM(vm_shm_nopage),
42
        open:    DRM(vm_open),
43
        close:   DRM(vm_shm_close),
44
};
45
 
46
struct vm_operations_struct   DRM(vm_dma_ops) = {
47
        nopage:  DRM(vm_dma_nopage),
48
        open:    DRM(vm_open),
49
        close:   DRM(vm_close),
50
};
51
 
52
struct vm_operations_struct   DRM(vm_sg_ops) = {
53
        nopage:  DRM(vm_sg_nopage),
54
        open:    DRM(vm_open),
55
        close:   DRM(vm_close),
56
};
57
 
58
struct page *DRM(vm_nopage)(struct vm_area_struct *vma,
59
                            unsigned long address,
60
                            int write_access)
61
{
62
#if __REALLY_HAVE_AGP
63
        drm_file_t *priv  = vma->vm_file->private_data;
64
        drm_device_t *dev = priv->dev;
65
        drm_map_t *map    = NULL;
66
        drm_map_list_t  *r_list;
67
        struct list_head *list;
68
 
69
        /*
70
         * Find the right map
71
         */
72
 
73
        if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
74
 
75
        list_for_each(list, &dev->maplist->head) {
76
                r_list = (drm_map_list_t *)list;
77
                map = r_list->map;
78
                if (!map) continue;
79
                if (map->offset == VM_OFFSET(vma)) break;
80
        }
81
 
82
        if (map && map->type == _DRM_AGP) {
83
                unsigned long offset = address - vma->vm_start;
84
                unsigned long baddr = VM_OFFSET(vma) + offset;
85
                struct drm_agp_mem *agpmem;
86
                struct page *page;
87
 
88
#if __alpha__
89
                /*
90
                 * Adjust to a bus-relative address
91
                 */
92
                baddr -= dev->hose->mem_space->start;
93
#endif
94
 
95
                /*
96
                 * It's AGP memory - find the real physical page to map
97
                 */
98
                for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
99
                        if (agpmem->bound <= baddr &&
100
                            agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
101
                                break;
102
                }
103
 
104
                if (!agpmem) goto vm_nopage_error;
105
 
106
                /*
107
                 * Get the page, inc the use count, and return it
108
                 */
109
                offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
110
                agpmem->memory->memory[offset] &= dev->agp->page_mask;
111
                page = virt_to_page(__va(agpmem->memory->memory[offset]));
112
                get_page(page);
113
 
114
                DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx\n",
115
                          baddr, __va(agpmem->memory->memory[offset]), offset);
116
 
117
                return page;
118
        }
119
vm_nopage_error:
120
#endif /* __REALLY_HAVE_AGP */
121
 
122
        return NOPAGE_SIGBUS;           /* Disallow mremap */
123
}
124
 
125
struct page *DRM(vm_shm_nopage)(struct vm_area_struct *vma,
126
                                unsigned long address,
127
                                int write_access)
128
{
129
        drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
130
        unsigned long    offset;
131
        unsigned long    i;
132
        struct page      *page;
133
 
134
        if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
135
        if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
136
 
137
        offset   = address - vma->vm_start;
138
        i = (unsigned long)map->handle + offset;
139
        page = vmalloc_to_page((void *)i);
140
        if (!page)
141
                return NOPAGE_OOM;
142
        get_page(page);
143
 
144
        DRM_DEBUG("shm_nopage 0x%lx\n", address);
145
        return page;
146
}
147
 
148
/* Special close routine which deletes map information if we are the last
149
 * person to close a mapping and its not in the global maplist.
150
 */
151
 
152
void DRM(vm_shm_close)(struct vm_area_struct *vma)
153
{
154
        drm_file_t      *priv   = vma->vm_file->private_data;
155
        drm_device_t    *dev    = priv->dev;
156
        drm_vma_entry_t *pt, *prev, *next;
157
        drm_map_t *map;
158
        drm_map_list_t *r_list;
159
        struct list_head *list;
160
        int found_maps = 0;
161
 
162
        DRM_DEBUG("0x%08lx,0x%08lx\n",
163
                  vma->vm_start, vma->vm_end - vma->vm_start);
164
        atomic_dec(&dev->vma_count);
165
 
166
        map = vma->vm_private_data;
167
 
168
        down(&dev->struct_sem);
169
        for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
170
                next = pt->next;
171
                if (pt->vma->vm_private_data == map) found_maps++;
172
                if (pt->vma == vma) {
173
                        if (prev) {
174
                                prev->next = pt->next;
175
                        } else {
176
                                dev->vmalist = pt->next;
177
                        }
178
                        DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
179
                } else {
180
                        prev = pt;
181
                }
182
        }
183
        /* We were the only map that was found */
184
        if(found_maps == 1 &&
185
           map->flags & _DRM_REMOVABLE) {
186
                /* Check to see if we are in the maplist, if we are not, then
187
                 * we delete this mappings information.
188
                 */
189
                found_maps = 0;
190
                list = &dev->maplist->head;
191
                list_for_each(list, &dev->maplist->head) {
192
                        r_list = (drm_map_list_t *) list;
193
                        if (r_list->map == map) found_maps++;
194
                }
195
 
196
                if(!found_maps) {
197
                        switch (map->type) {
198
                        case _DRM_REGISTERS:
199
                        case _DRM_FRAME_BUFFER:
200
#if __REALLY_HAVE_MTRR
201
                                if (map->mtrr >= 0) {
202
                                        int retcode;
203
                                        retcode = mtrr_del(map->mtrr,
204
                                                           map->offset,
205
                                                           map->size);
206
                                        DRM_DEBUG("mtrr_del = %d\n", retcode);
207
                                }
208
#endif
209
                                DRM(ioremapfree)(map->handle, map->size, dev);
210
                                break;
211
                        case _DRM_SHM:
212
                                vfree(map->handle);
213
                                break;
214
                        case _DRM_AGP:
215
                        case _DRM_SCATTER_GATHER:
216
                                break;
217
                        }
218
                        DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
219
                }
220
        }
221
        up(&dev->struct_sem);
222
}
223
 
224
struct page *DRM(vm_dma_nopage)(struct vm_area_struct *vma,
225
                                unsigned long address,
226
                                int write_access)
227
{
228
        drm_file_t       *priv   = vma->vm_file->private_data;
229
        drm_device_t     *dev    = priv->dev;
230
        drm_device_dma_t *dma    = dev->dma;
231
        unsigned long    offset;
232
        unsigned long    page_nr;
233
        struct page      *page;
234
 
235
        if (!dma)                  return NOPAGE_SIGBUS; /* Error */
236
        if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
237
        if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
238
 
239
        offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
240
        page_nr  = offset >> PAGE_SHIFT;
241
        page = virt_to_page((dma->pagelist[page_nr] +
242
                             (offset & (~PAGE_MASK))));
243
 
244
        get_page(page);
245
 
246
        DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
247
        return page;
248
}
249
 
250
struct page *DRM(vm_sg_nopage)(struct vm_area_struct *vma,
251
                               unsigned long address,
252
                               int write_access)
253
{
254
        drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
255
        drm_file_t *priv = vma->vm_file->private_data;
256
        drm_device_t *dev = priv->dev;
257
        drm_sg_mem_t *entry = dev->sg;
258
        unsigned long offset;
259
        unsigned long map_offset;
260
        unsigned long page_offset;
261
        struct page *page;
262
 
263
        if (!entry)                return NOPAGE_SIGBUS; /* Error */
264
        if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
265
        if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
266
 
267
 
268
        offset = address - vma->vm_start;
269
        map_offset = map->offset - dev->sg->handle;
270
        page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
271
        page = entry->pagelist[page_offset];
272
        get_page(page);
273
 
274
        return page;
275
}
276
 
277
void DRM(vm_open)(struct vm_area_struct *vma)
278
{
279
        drm_file_t      *priv   = vma->vm_file->private_data;
280
        drm_device_t    *dev    = priv->dev;
281
        drm_vma_entry_t *vma_entry;
282
 
283
        DRM_DEBUG("0x%08lx,0x%08lx\n",
284
                  vma->vm_start, vma->vm_end - vma->vm_start);
285
        atomic_inc(&dev->vma_count);
286
 
287
        vma_entry = DRM(alloc)(sizeof(*vma_entry), DRM_MEM_VMAS);
288
        if (vma_entry) {
289
                down(&dev->struct_sem);
290
                vma_entry->vma  = vma;
291
                vma_entry->next = dev->vmalist;
292
                vma_entry->pid  = current->pid;
293
                dev->vmalist    = vma_entry;
294
                up(&dev->struct_sem);
295
        }
296
}
297
 
298
void DRM(vm_close)(struct vm_area_struct *vma)
299
{
300
        drm_file_t      *priv   = vma->vm_file->private_data;
301
        drm_device_t    *dev    = priv->dev;
302
        drm_vma_entry_t *pt, *prev;
303
 
304
        DRM_DEBUG("0x%08lx,0x%08lx\n",
305
                  vma->vm_start, vma->vm_end - vma->vm_start);
306
        atomic_dec(&dev->vma_count);
307
 
308
        down(&dev->struct_sem);
309
        for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
310
                if (pt->vma == vma) {
311
                        if (prev) {
312
                                prev->next = pt->next;
313
                        } else {
314
                                dev->vmalist = pt->next;
315
                        }
316
                        DRM(free)(pt, sizeof(*pt), DRM_MEM_VMAS);
317
                        break;
318
                }
319
        }
320
        up(&dev->struct_sem);
321
}
322
 
323
int DRM(mmap_dma)(struct file *filp, struct vm_area_struct *vma)
324
{
325
        drm_file_t       *priv   = filp->private_data;
326
        drm_device_t     *dev;
327
        drm_device_dma_t *dma;
328
        unsigned long    length  = vma->vm_end - vma->vm_start;
329
 
330
        lock_kernel();
331
        dev      = priv->dev;
332
        dma      = dev->dma;
333
        DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
334
                  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
335
 
336
                                /* Length must match exact page count */
337
        if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
338
                unlock_kernel();
339
                return -EINVAL;
340
        }
341
        unlock_kernel();
342
 
343
        vma->vm_ops   = &DRM(vm_dma_ops);
344
        vma->vm_flags |= VM_RESERVED; /* Don't swap */
345
        vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
346
        DRM(vm_open)(vma);
347
        return 0;
348
}
349
 
350
#ifndef DRIVER_GET_MAP_OFS
351
#define DRIVER_GET_MAP_OFS()    (map->offset)
352
#endif
353
 
354
#ifndef DRIVER_GET_REG_OFS
355
#ifdef __alpha__
356
#define DRIVER_GET_REG_OFS()    (dev->hose->dense_mem_base -    \
357
                                 dev->hose->mem_space->start)
358
#else
359
#define DRIVER_GET_REG_OFS()    0
360
#endif
361
#endif
362
 
363
int DRM(mmap)(struct file *filp, struct vm_area_struct *vma)
364
{
365
        drm_file_t      *priv   = filp->private_data;
366
        drm_device_t    *dev    = priv->dev;
367
        drm_map_t       *map    = NULL;
368
        drm_map_list_t  *r_list;
369
        unsigned long   offset  = 0;
370
        struct list_head *list;
371
 
372
        DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
373
                  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
374
 
375
        if ( !priv->authenticated ) return -EACCES;
376
 
377
        if (!VM_OFFSET(vma)) return DRM(mmap_dma)(filp, vma);
378
 
379
                                /* A sequential search of a linked list is
380
                                   fine here because: 1) there will only be
381
                                   about 5-10 entries in the list and, 2) a
382
                                   DRI client only has to do this mapping
383
                                   once, so it doesn't have to be optimized
384
                                   for performance, even if the list was a
385
                                   bit longer. */
386
        list_for_each(list, &dev->maplist->head) {
387
                unsigned long off;
388
 
389
                r_list = (drm_map_list_t *)list;
390
                map = r_list->map;
391
                if (!map) continue;
392
                off = DRIVER_GET_MAP_OFS();
393
                if (off == VM_OFFSET(vma)) break;
394
        }
395
 
396
        if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
397
                return -EPERM;
398
 
399
                                /* Check for valid size. */
400
        if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
401
 
402
        if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
403
                vma->vm_flags &= VM_MAYWRITE;
404
#if defined(__i386__) || defined(__x86_64__)
405
                pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
406
#else
407
                                /* Ye gads this is ugly.  With more thought
408
                                   we could move this up higher and use
409
                                   `protection_map' instead.  */
410
                vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
411
                        __pte(pgprot_val(vma->vm_page_prot)))));
412
#endif
413
        }
414
 
415
        switch (map->type) {
416
        case _DRM_AGP:
417
#if defined(__alpha__)
418
                /*
419
                 * On Alpha we can't talk to bus dma address from the
420
                 * CPU, so for memory of type DRM_AGP, we'll deal with
421
                 * sorting out the real physical pages and mappings
422
                 * in nopage()
423
                 */
424
                vma->vm_ops = &DRM(vm_ops);
425
                break;
426
#endif
427
                /* fall through to _DRM_FRAME_BUFFER... */
428
        case _DRM_FRAME_BUFFER:
429
        case _DRM_REGISTERS:
430
                if (VM_OFFSET(vma) >= __pa(high_memory)) {
431
#if defined(__i386__) || defined(__x86_64__)
432
                        if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
433
                                pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
434
                                pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
435
                        }
436
#elif defined(__ia64__)
437
                        if (map->type != _DRM_AGP)
438
                                vma->vm_page_prot =
439
                                        pgprot_writecombine(vma->vm_page_prot);
440
#elif defined(__powerpc__)
441
                        pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
442
#endif
443
                        vma->vm_flags |= VM_IO; /* not in core dump */
444
                }
445
                offset = DRIVER_GET_REG_OFS();
446
#ifdef __sparc__
447
                if (io_remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
448
                                        VM_OFFSET(vma) + offset,
449
                                        vma->vm_end - vma->vm_start,
450
                                        vma->vm_page_prot, 0))
451
#else
452
                if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
453
                                     VM_OFFSET(vma) + offset,
454
                                     vma->vm_end - vma->vm_start,
455
                                     vma->vm_page_prot))
456
#endif
457
                                return -EAGAIN;
458
                DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
459
                          " offset = 0x%lx\n",
460
                          map->type,
461
                          vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
462
                vma->vm_ops = &DRM(vm_ops);
463
                break;
464
        case _DRM_SHM:
465
                vma->vm_ops = &DRM(vm_shm_ops);
466
                vma->vm_private_data = (void *)map;
467
                                /* Don't let this area swap.  Change when
468
                                   DRM_KERNEL advisory is supported. */
469
                break;
470
        case _DRM_SCATTER_GATHER:
471
                vma->vm_ops = &DRM(vm_sg_ops);
472
                vma->vm_private_data = (void *)map;
473
                break;
474
        default:
475
                return -EINVAL; /* This should never happen. */
476
        }
477
        vma->vm_flags |= VM_RESERVED; /* Don't swap */
478
 
479
        vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
480
        DRM(vm_open)(vma);
481
        return 0;
482
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.