OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [drm-4.0/] [context.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* context.c -- IOCTLs for contexts and DMA queues -*- linux-c -*-
2
 * Created: Tue Feb  2 08:37:54 1999 by faith@precisioninsight.com
3
 *
4
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the "Software"),
10
 * to deal in the Software without restriction, including without limitation
11
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
 * and/or sell copies of the Software, and to permit persons to whom the
13
 * Software is furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the next
16
 * paragraph) shall be included in all copies or substantial portions of the
17
 * Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
 * DEALINGS IN THE SOFTWARE.
26
 *
27
 * Authors:
28
 *    Rickard E. (Rik) Faith <faith@valinux.com>
29
 *
30
 */
31
 
32
#define __NO_VERSION__
33
#include "drmP.h"
34
 
35
static int drm_init_queue(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
36
{
37
        DRM_DEBUG("\n");
38
 
39
        if (atomic_read(&q->use_count) != 1
40
            || atomic_read(&q->finalization)
41
            || atomic_read(&q->block_count)) {
42
                DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
43
                          atomic_read(&q->use_count),
44
                          atomic_read(&q->finalization),
45
                          atomic_read(&q->block_count));
46
        }
47
 
48
        atomic_set(&q->finalization,  0);
49
        atomic_set(&q->block_count,   0);
50
        atomic_set(&q->block_read,    0);
51
        atomic_set(&q->block_write,   0);
52
        atomic_set(&q->total_queued,  0);
53
        atomic_set(&q->total_flushed, 0);
54
        atomic_set(&q->total_locks,   0);
55
 
56
        init_waitqueue_head(&q->write_queue);
57
        init_waitqueue_head(&q->read_queue);
58
        init_waitqueue_head(&q->flush_queue);
59
 
60
        q->flags = ctx->flags;
61
 
62
        drm_waitlist_create(&q->waitlist, dev->dma->buf_count);
63
 
64
        return 0;
65
}
66
 
67
 
68
/* drm_alloc_queue:
69
PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
70
        disappear (so all deallocation must be done after IOCTLs are off)
71
     2) dev->queue_count < dev->queue_slots
72
     3) dev->queuelist[i].use_count == 0 and
73
        dev->queuelist[i].finalization == 0 if i not in use
74
POST: 1) dev->queuelist[i].use_count == 1
75
      2) dev->queue_count < dev->queue_slots */
76
 
77
static int drm_alloc_queue(drm_device_t *dev)
78
{
79
        int         i;
80
        drm_queue_t *queue;
81
        int         oldslots;
82
        int         newslots;
83
                                /* Check for a free queue */
84
        for (i = 0; i < dev->queue_count; i++) {
85
                atomic_inc(&dev->queuelist[i]->use_count);
86
                if (atomic_read(&dev->queuelist[i]->use_count) == 1
87
                    && !atomic_read(&dev->queuelist[i]->finalization)) {
88
                        DRM_DEBUG("%d (free)\n", i);
89
                        return i;
90
                }
91
                atomic_dec(&dev->queuelist[i]->use_count);
92
        }
93
                                /* Allocate a new queue */
94
 
95
        queue = drm_alloc(sizeof(*queue), DRM_MEM_QUEUES);
96
        if(queue == NULL)
97
                return -ENOMEM;
98
 
99
        memset(queue, 0, sizeof(*queue));
100
        down(&dev->struct_sem);
101
        atomic_set(&queue->use_count, 1);
102
 
103
        ++dev->queue_count;
104
        if (dev->queue_count >= dev->queue_slots) {
105
                oldslots = dev->queue_slots * sizeof(*dev->queuelist);
106
                if (!dev->queue_slots) dev->queue_slots = 1;
107
                dev->queue_slots *= 2;
108
                newslots = dev->queue_slots * sizeof(*dev->queuelist);
109
 
110
                dev->queuelist = drm_realloc(dev->queuelist,
111
                                             oldslots,
112
                                             newslots,
113
                                             DRM_MEM_QUEUES);
114
                if (!dev->queuelist) {
115
                        up(&dev->struct_sem);
116
                        DRM_DEBUG("out of memory\n");
117
                        return -ENOMEM;
118
                }
119
        }
120
        dev->queuelist[dev->queue_count-1] = queue;
121
 
122
        up(&dev->struct_sem);
123
        DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
124
        return dev->queue_count - 1;
125
}
126
 
127
int drm_resctx(struct inode *inode, struct file *filp, unsigned int cmd,
128
               unsigned long arg)
129
{
130
        drm_ctx_res_t   res;
131
        drm_ctx_t       ctx;
132
        int             i;
133
 
134
        DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
135
        if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
136
                return -EFAULT;
137
        if (res.count >= DRM_RESERVED_CONTEXTS) {
138
                memset(&ctx, 0, sizeof(ctx));
139
                for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
140
                        ctx.handle = i;
141
                        if (copy_to_user(&res.contexts[i],
142
                                         &i,
143
                                         sizeof(i)))
144
                                return -EFAULT;
145
                }
146
        }
147
        res.count = DRM_RESERVED_CONTEXTS;
148
        if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
149
                return -EFAULT;
150
        return 0;
151
}
152
 
153
 
154
int drm_addctx(struct inode *inode, struct file *filp, unsigned int cmd,
155
               unsigned long arg)
156
{
157
        drm_file_t      *priv   = filp->private_data;
158
        drm_device_t    *dev    = priv->dev;
159
        drm_ctx_t       ctx;
160
 
161
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
162
                return -EFAULT;
163
        if ((ctx.handle = drm_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
164
                                /* Init kernel's context and get a new one. */
165
                drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
166
                ctx.handle = drm_alloc_queue(dev);
167
        }
168
        drm_init_queue(dev, dev->queuelist[ctx.handle], &ctx);
169
        DRM_DEBUG("%d\n", ctx.handle);
170
        if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
171
                return -EFAULT;
172
        return 0;
173
}
174
 
175
int drm_modctx(struct inode *inode, struct file *filp, unsigned int cmd,
176
               unsigned long arg)
177
{
178
        drm_file_t      *priv   = filp->private_data;
179
        drm_device_t    *dev    = priv->dev;
180
        drm_ctx_t       ctx;
181
        drm_queue_t     *q;
182
 
183
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
184
                return -EFAULT;
185
 
186
        DRM_DEBUG("%d\n", ctx.handle);
187
 
188
        if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
189
        q = dev->queuelist[ctx.handle];
190
 
191
        atomic_inc(&q->use_count);
192
        if (atomic_read(&q->use_count) == 1) {
193
                                /* No longer in use */
194
                atomic_dec(&q->use_count);
195
                return -EINVAL;
196
        }
197
 
198
        if (DRM_BUFCOUNT(&q->waitlist)) {
199
                atomic_dec(&q->use_count);
200
                return -EBUSY;
201
        }
202
 
203
        q->flags = ctx.flags;
204
 
205
        atomic_dec(&q->use_count);
206
        return 0;
207
}
208
 
209
int drm_getctx(struct inode *inode, struct file *filp, unsigned int cmd,
210
               unsigned long arg)
211
{
212
        drm_file_t      *priv   = filp->private_data;
213
        drm_device_t    *dev    = priv->dev;
214
        drm_ctx_t       ctx;
215
        drm_queue_t     *q;
216
 
217
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
218
                return -EFAULT;
219
 
220
        DRM_DEBUG("%d\n", ctx.handle);
221
 
222
        if (ctx.handle >= dev->queue_count) return -EINVAL;
223
        q = dev->queuelist[ctx.handle];
224
 
225
        atomic_inc(&q->use_count);
226
        if (atomic_read(&q->use_count) == 1) {
227
                                /* No longer in use */
228
                atomic_dec(&q->use_count);
229
                return -EINVAL;
230
        }
231
 
232
        ctx.flags = q->flags;
233
        atomic_dec(&q->use_count);
234
 
235
        if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
236
                return -EFAULT;
237
 
238
        return 0;
239
}
240
 
241
int drm_switchctx(struct inode *inode, struct file *filp, unsigned int cmd,
242
                  unsigned long arg)
243
{
244
        drm_file_t      *priv   = filp->private_data;
245
        drm_device_t    *dev    = priv->dev;
246
        drm_ctx_t       ctx;
247
 
248
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
249
                return -EFAULT;
250
        DRM_DEBUG("%d\n", ctx.handle);
251
        return drm_context_switch(dev, dev->last_context, ctx.handle);
252
}
253
 
254
int drm_newctx(struct inode *inode, struct file *filp, unsigned int cmd,
255
               unsigned long arg)
256
{
257
        drm_file_t      *priv   = filp->private_data;
258
        drm_device_t    *dev    = priv->dev;
259
        drm_ctx_t       ctx;
260
 
261
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
262
                return -EFAULT;
263
        DRM_DEBUG("%d\n", ctx.handle);
264
        drm_context_switch_complete(dev, ctx.handle);
265
 
266
        return 0;
267
}
268
 
269
int drm_rmctx(struct inode *inode, struct file *filp, unsigned int cmd,
270
              unsigned long arg)
271
{
272
        drm_file_t      *priv   = filp->private_data;
273
        drm_device_t    *dev    = priv->dev;
274
        drm_ctx_t       ctx;
275
        drm_queue_t     *q;
276
        drm_buf_t       *buf;
277
 
278
        if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
279
                return -EFAULT;
280
        DRM_DEBUG("%d\n", ctx.handle);
281
 
282
        if (ctx.handle >= dev->queue_count) return -EINVAL;
283
        q = dev->queuelist[ctx.handle];
284
 
285
        atomic_inc(&q->use_count);
286
        if (atomic_read(&q->use_count) == 1) {
287
                                /* No longer in use */
288
                atomic_dec(&q->use_count);
289
                return -EINVAL;
290
        }
291
 
292
        atomic_inc(&q->finalization); /* Mark queue in finalization state */
293
        atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
294
                                         finalization) */
295
 
296
        while (test_and_set_bit(0, &dev->interrupt_flag)) {
297
                schedule();
298
                if (signal_pending(current)) {
299
                        clear_bit(0, &dev->interrupt_flag);
300
                        return -EINTR;
301
                }
302
        }
303
                                /* Remove queued buffers */
304
        while ((buf = drm_waitlist_get(&q->waitlist))) {
305
                drm_free_buffer(dev, buf);
306
        }
307
        clear_bit(0, &dev->interrupt_flag);
308
 
309
                                /* Wakeup blocked processes */
310
        wake_up_interruptible(&q->read_queue);
311
        wake_up_interruptible(&q->write_queue);
312
        wake_up_interruptible(&q->flush_queue);
313
 
314
                                /* Finalization over.  Queue is made
315
                                   available when both use_count and
316
                                   finalization become 0, which won't
317
                                   happen until all the waiting processes
318
                                   stop waiting. */
319
        atomic_dec(&q->finalization);
320
        return 0;
321
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.