OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [drm/] [radeon_mem.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*-
2
 *
3
 * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
4
 *
5
 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6
 * initial release of the Radeon 8500 driver under the XFree86 license.
7
 * This notice must be preserved.
8
 *
9
 * Permission is hereby granted, free of charge, to any person obtaining a
10
 * copy of this software and associated documentation files (the "Software"),
11
 * to deal in the Software without restriction, including without limitation
12
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13
 * and/or sell copies of the Software, and to permit persons to whom the
14
 * Software is furnished to do so, subject to the following conditions:
15
 *
16
 * The above copyright notice and this permission notice (including the next
17
 * paragraph) shall be included in all copies or substantial portions of the
18
 * Software.
19
 *
20
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26
 * DEALINGS IN THE SOFTWARE.
27
 *
28
 * Authors:
29
 *    Keith Whitwell <keith@tungstengraphics.com>
30
 */
31
 
32
#include "radeon.h"
33
#include "drmP.h"
34
#include "drm.h"
35
#include "radeon_drm.h"
36
#include "radeon_drv.h"
37
#include "drm_os_linux.h"
38
 
39
/* Very simple allocator for agp memory, working on a static range
40
 * already mapped into each client's address space.
41
 */
42
 
43
static struct mem_block *split_block(struct mem_block *p, int start, int size,
44
                                     int pid )
45
{
46
        /* Maybe cut off the start of an existing block */
47
        if (start > p->start) {
48
                struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL);
49
                if (!newblock)
50
                        goto out;
51
                newblock->start = start;
52
                newblock->size = p->size - (start - p->start);
53
                newblock->pid = 0;
54
                newblock->next = p->next;
55
                newblock->prev = p;
56
                p->next->prev = newblock;
57
                p->next = newblock;
58
                p->size -= newblock->size;
59
                p = newblock;
60
        }
61
 
62
        /* Maybe cut off the end of an existing block */
63
        if (size < p->size) {
64
                struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL);
65
                if (!newblock)
66
                        goto out;
67
                newblock->start = start + size;
68
                newblock->size = p->size - size;
69
                newblock->pid = 0;
70
                newblock->next = p->next;
71
                newblock->prev = p;
72
                p->next->prev = newblock;
73
                p->next = newblock;
74
                p->size = size;
75
        }
76
 
77
 out:
78
        /* Our block is in the middle */
79
        p->pid = pid;
80
        return p;
81
}
82
 
83
static struct mem_block *alloc_block( struct mem_block *heap, int size,
84
                                      int align2, int pid )
85
{
86
        struct mem_block *p;
87
        int mask = (1 << align2)-1;
88
 
89
        for (p = heap->next ; p != heap ; p = p->next) {
90
                int start = (p->start + mask) & ~mask;
91
                if (p->pid == 0 && start + size <= p->start + p->size)
92
                        return split_block( p, start, size, pid );
93
        }
94
 
95
        return NULL;
96
}
97
 
98
static struct mem_block *find_block( struct mem_block *heap, int start )
99
{
100
        struct mem_block *p;
101
 
102
        for (p = heap->next ; p != heap ; p = p->next)
103
                if (p->start == start)
104
                        return p;
105
 
106
        return NULL;
107
}
108
 
109
 
110
static void free_block( struct mem_block *p )
111
{
112
        p->pid = 0;
113
 
114
        /* Assumes a single contiguous range.  Needs a special pid in
115
         * 'heap' to stop it being subsumed.
116
         */
117
        if (p->next->pid == 0) {
118
                struct mem_block *q = p->next;
119
                p->size += q->size;
120
                p->next = q->next;
121
                p->next->prev = p;
122
                kfree(q);
123
        }
124
 
125
        if (p->prev->pid == 0) {
126
                struct mem_block *q = p->prev;
127
                q->size += p->size;
128
                q->next = p->next;
129
                q->next->prev = q;
130
                kfree(p);
131
        }
132
}
133
 
134
static void print_heap( struct mem_block *heap )
135
{
136
        struct mem_block *p;
137
 
138
        for (p = heap->next ; p != heap ; p = p->next)
139
                DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
140
                          p->start, p->start + p->size,
141
                          p->size, p->pid);
142
}
143
 
144
/* Initialize.  How to check for an uninitialized heap?
145
 */
146
static int init_heap(struct mem_block **heap, int start, int size)
147
{
148
        struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
149
 
150
        if (!blocks)
151
                return -ENOMEM;
152
 
153
        *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
154
        if (!*heap) {
155
                kfree( blocks );
156
                return -ENOMEM;
157
        }
158
 
159
        blocks->start = start;
160
        blocks->size = size;
161
        blocks->pid = 0;
162
        blocks->next = blocks->prev = *heap;
163
 
164
        memset( *heap, 0, sizeof(**heap) );
165
        (*heap)->pid = -1;
166
        (*heap)->next = (*heap)->prev = blocks;
167
        return 0;
168
}
169
 
170
 
171
/* Free all blocks associated with the releasing pid.
172
 */
173
void radeon_mem_release( struct mem_block *heap )
174
{
175
        int pid = current->pid;
176
        struct mem_block *p;
177
 
178
        if (!heap || !heap->next)
179
                return;
180
 
181
        for (p = heap->next ; p != heap ; p = p->next) {
182
                if (p->pid == pid)
183
                        p->pid = 0;
184
        }
185
 
186
        /* Assumes a single contiguous range.  Needs a special pid in
187
         * 'heap' to stop it being subsumed.
188
         */
189
        for (p = heap->next ; p != heap ; p = p->next) {
190
                while (p->pid == 0 && p->next->pid == 0) {
191
                        struct mem_block *q = p->next;
192
                        p->size += q->size;
193
                        p->next = q->next;
194
                        p->next->prev = p;
195
                        kfree(q);
196
                }
197
        }
198
}
199
 
200
/* Shutdown.
201
 */
202
void radeon_mem_takedown( struct mem_block **heap )
203
{
204
        struct mem_block *p;
205
 
206
        if (!*heap)
207
                return;
208
 
209
        for (p = (*heap)->next ; p != *heap ; ) {
210
                struct mem_block *q = p;
211
                p = p->next;
212
                kfree(q);
213
        }
214
 
215
        kfree( *heap );
216
        *heap = 0;
217
}
218
 
219
 
220
 
221
/* IOCTL HANDLERS */
222
 
223
static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
224
                                   int region )
225
{
226
        switch( region ) {
227
        case RADEON_MEM_REGION_AGP:
228
                return &dev_priv->agp_heap;
229
        case RADEON_MEM_REGION_FB:
230
                return &dev_priv->fb_heap;
231
        default:
232
                return 0;
233
        }
234
}
235
 
236
int radeon_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data )
237
{
238
        drm_file_t      *priv   = filp->private_data;
239
        drm_device_t    *dev    = priv->dev;
240
        drm_radeon_private_t *dev_priv = dev->dev_private;
241
        drm_radeon_mem_alloc_t alloc;
242
        struct mem_block *block, **heap;
243
 
244
        if ( !dev_priv ) {
245
                DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
246
                return -EINVAL;
247
        }
248
 
249
        DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
250
                                  sizeof(alloc) );
251
 
252
        heap = get_heap( dev_priv, alloc.region );
253
        if (!heap || !*heap)
254
                return -EFAULT;
255
 
256
        /* Make things easier on ourselves: all allocations at least
257
         * 4k aligned.
258
         */
259
        if (alloc.alignment < 12)
260
                alloc.alignment = 12;
261
 
262
        block = alloc_block( *heap, alloc.size, alloc.alignment,
263
                             current->pid );
264
 
265
        if (!block)
266
                return -ENOMEM;
267
 
268
        if ( copy_to_user( alloc.region_offset, &block->start,
269
                               sizeof(int) ) ) {
270
                DRM_ERROR( "copy_to_user\n" );
271
                return -EFAULT;
272
        }
273
 
274
        return 0;
275
}
276
 
277
 
278
 
279
int radeon_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data)
280
{
281
        drm_file_t      *priv   = filp->private_data;
282
        drm_device_t    *dev    = priv->dev;
283
        drm_radeon_private_t *dev_priv = dev->dev_private;
284
        drm_radeon_mem_free_t memfree;
285
        struct mem_block *block, **heap;
286
 
287
        if ( !dev_priv ) {
288
                DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
289
                return -EINVAL;
290
        }
291
 
292
        DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
293
                                  sizeof(memfree) );
294
 
295
        heap = get_heap( dev_priv, memfree.region );
296
        if (!heap || !*heap)
297
                return -EFAULT;
298
 
299
        block = find_block( *heap, memfree.region_offset );
300
        if (!block)
301
                return -EFAULT;
302
 
303
        if (block->pid != current->pid)
304
                return -EPERM;
305
 
306
        free_block( block );
307
        return 0;
308
}
309
 
310
int radeon_mem_init_heap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data)
311
{
312
        drm_file_t      *priv   = filp->private_data;
313
        drm_device_t    *dev    = priv->dev;
314
        drm_radeon_private_t *dev_priv = dev->dev_private;
315
        drm_radeon_mem_init_heap_t initheap;
316
        struct mem_block **heap;
317
 
318
        if ( !dev_priv ) {
319
                DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
320
                return -EINVAL;
321
        }
322
 
323
        DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
324
                                  sizeof(initheap) );
325
 
326
        heap = get_heap( dev_priv, initheap.region );
327
        if (!heap)
328
                return -EFAULT;
329
 
330
        if (*heap) {
331
                DRM_ERROR("heap already initialized?");
332
                return -EFAULT;
333
        }
334
 
335
        return init_heap( heap, initheap.start, initheap.size );
336
}
337
 
338
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.