OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [drm-4.0/] [gamma_dma.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2
 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3
 *
4
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the "Software"),
10
 * to deal in the Software without restriction, including without limitation
11
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
 * and/or sell copies of the Software, and to permit persons to whom the
13
 * Software is furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the next
16
 * paragraph) shall be included in all copies or substantial portions of the
17
 * Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
 * DEALINGS IN THE SOFTWARE.
26
 *
27
 * Authors:
28
 *    Rickard E. (Rik) Faith <faith@valinux.com>
29
 *
30
 */
31
 
32
#define __NO_VERSION__
33
#include "drmP.h"
34
#include "gamma_drv.h"
35
 
36
#include <linux/interrupt.h>    /* For task queue support */
37
 
38
 
39
/* WARNING!!! MAGIC NUMBER!!!  The number of regions already added to the
40
   kernel must be specified here.  Currently, the number is 2.  This must
41
   match the order the X server uses for instantiating register regions ,
42
   or must be passed in a new ioctl. */
43
#define GAMMA_REG(reg)                                             \
44
        (2                                                         \
45
         + ((reg < 0x1000)                                         \
46
            ? 0                                                     \
47
            : ((reg < 0x10000) ? 1 : ((reg < 0x11000) ? 2 : 3))))
48
 
49
#define GAMMA_OFF(reg)                                             \
50
        ((reg < 0x1000)                                            \
51
         ? reg                                                     \
52
         : ((reg < 0x10000)                                        \
53
            ? (reg - 0x1000)                                       \
54
            : ((reg < 0x11000)                                     \
55
               ? (reg - 0x10000)                                   \
56
               : (reg - 0x11000))))
57
 
58
#define GAMMA_BASE(reg)  ((unsigned long)dev->maplist[GAMMA_REG(reg)]->handle)
59
#define GAMMA_ADDR(reg)  (GAMMA_BASE(reg) + GAMMA_OFF(reg))
60
#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
61
#define GAMMA_READ(reg)  GAMMA_DEREF(reg)
62
#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
63
 
64
#define GAMMA_BROADCASTMASK    0x9378
65
#define GAMMA_COMMANDINTENABLE 0x0c48
66
#define GAMMA_DMAADDRESS       0x0028
67
#define GAMMA_DMACOUNT         0x0030
68
#define GAMMA_FILTERMODE       0x8c00
69
#define GAMMA_GCOMMANDINTFLAGS 0x0c50
70
#define GAMMA_GCOMMANDMODE     0x0c40
71
#define GAMMA_GCOMMANDSTATUS   0x0c60
72
#define GAMMA_GDELAYTIMER      0x0c38
73
#define GAMMA_GDMACONTROL      0x0060
74
#define GAMMA_GINTENABLE       0x0808
75
#define GAMMA_GINTFLAGS        0x0810
76
#define GAMMA_INFIFOSPACE      0x0018
77
#define GAMMA_OUTFIFOWORDS     0x0020
78
#define GAMMA_OUTPUTFIFO       0x2000
79
#define GAMMA_SYNC             0x8c40
80
#define GAMMA_SYNC_TAG         0x0188
81
 
82
static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
83
                                      unsigned long length)
84
{
85
        GAMMA_WRITE(GAMMA_DMAADDRESS, virt_to_phys((void *)address));
86
        while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
87
                ;
88
        GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
89
}
90
 
91
static inline void gamma_dma_quiescent_single(drm_device_t *dev)
92
{
93
        while (GAMMA_READ(GAMMA_DMACOUNT))
94
                ;
95
        while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
96
                ;
97
 
98
        GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
99
        GAMMA_WRITE(GAMMA_SYNC, 0);
100
 
101
        do {
102
                while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
103
                        ;
104
        } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
105
}
106
 
107
static inline void gamma_dma_quiescent_dual(drm_device_t *dev)
108
{
109
        while (GAMMA_READ(GAMMA_DMACOUNT))
110
                ;
111
        while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
112
                ;
113
 
114
        GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
115
 
116
        GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
117
        GAMMA_WRITE(GAMMA_SYNC, 0);
118
 
119
                                /* Read from first MX */
120
        do {
121
                while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
122
                        ;
123
        } while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
124
 
125
                                /* Read from second MX */
126
        do {
127
                while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
128
                        ;
129
        } while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
130
}
131
 
132
static inline void gamma_dma_ready(drm_device_t *dev)
133
{
134
        while (GAMMA_READ(GAMMA_DMACOUNT))
135
                ;
136
}
137
 
138
static inline int gamma_dma_is_ready(drm_device_t *dev)
139
{
140
        return !GAMMA_READ(GAMMA_DMACOUNT);
141
}
142
 
143
static void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
144
{
145
        drm_device_t     *dev = (drm_device_t *)device;
146
        drm_device_dma_t *dma = dev->dma;
147
 
148
        atomic_inc(&dev->total_irq);
149
        GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
150
        GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
151
        GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
152
        if (gamma_dma_is_ready(dev)) {
153
                                /* Free previous buffer */
154
                if (test_and_set_bit(0, &dev->dma_flag)) {
155
                        atomic_inc(&dma->total_missed_free);
156
                        return;
157
                }
158
                if (dma->this_buffer) {
159
                        drm_free_buffer(dev, dma->this_buffer);
160
                        dma->this_buffer = NULL;
161
                }
162
                clear_bit(0, &dev->dma_flag);
163
 
164
                                /* Dispatch new buffer */
165
                queue_task(&dev->tq, &tq_immediate);
166
                mark_bh(IMMEDIATE_BH);
167
        }
168
}
169
 
170
/* Only called by gamma_dma_schedule. */
171
static int gamma_do_dma(drm_device_t *dev, int locked)
172
{
173
        unsigned long    address;
174
        unsigned long    length;
175
        drm_buf_t        *buf;
176
        int              retcode = 0;
177
        drm_device_dma_t *dma = dev->dma;
178
#if DRM_DMA_HISTOGRAM
179
        cycles_t         dma_start, dma_stop;
180
#endif
181
 
182
        if (test_and_set_bit(0, &dev->dma_flag)) {
183
                atomic_inc(&dma->total_missed_dma);
184
                return -EBUSY;
185
        }
186
 
187
#if DRM_DMA_HISTOGRAM
188
        dma_start = get_cycles();
189
#endif
190
 
191
        if (!dma->next_buffer) {
192
                DRM_ERROR("No next_buffer\n");
193
                clear_bit(0, &dev->dma_flag);
194
                return -EINVAL;
195
        }
196
 
197
        buf     = dma->next_buffer;
198
        address = (unsigned long)buf->address;
199
        length  = buf->used;
200
 
201
        DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
202
                  buf->context, buf->idx, length);
203
 
204
        if (buf->list == DRM_LIST_RECLAIM) {
205
                drm_clear_next_buffer(dev);
206
                drm_free_buffer(dev, buf);
207
                clear_bit(0, &dev->dma_flag);
208
                return -EINVAL;
209
        }
210
 
211
        if (!length) {
212
                DRM_ERROR("0 length buffer\n");
213
                drm_clear_next_buffer(dev);
214
                drm_free_buffer(dev, buf);
215
                clear_bit(0, &dev->dma_flag);
216
                return 0;
217
        }
218
 
219
        if (!gamma_dma_is_ready(dev)) {
220
                clear_bit(0, &dev->dma_flag);
221
                return -EBUSY;
222
        }
223
 
224
        if (buf->while_locked) {
225
                if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
226
                        DRM_ERROR("Dispatching buffer %d from pid %d"
227
                                  " \"while locked\", but no lock held\n",
228
                                  buf->idx, buf->pid);
229
                }
230
        } else {
231
                if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock,
232
                                              DRM_KERNEL_CONTEXT)) {
233
                        atomic_inc(&dma->total_missed_lock);
234
                        clear_bit(0, &dev->dma_flag);
235
                        return -EBUSY;
236
                }
237
        }
238
 
239
        if (dev->last_context != buf->context
240
            && !(dev->queuelist[buf->context]->flags
241
                 & _DRM_CONTEXT_PRESERVED)) {
242
                                /* PRE: dev->last_context != buf->context */
243
                if (drm_context_switch(dev, dev->last_context, buf->context)) {
244
                        drm_clear_next_buffer(dev);
245
                        drm_free_buffer(dev, buf);
246
                }
247
                retcode = -EBUSY;
248
                goto cleanup;
249
 
250
                                /* POST: we will wait for the context
251
                                   switch and will dispatch on a later call
252
                                   when dev->last_context == buf->context.
253
                                   NOTE WE HOLD THE LOCK THROUGHOUT THIS
254
                                   TIME! */
255
        }
256
 
257
        drm_clear_next_buffer(dev);
258
        buf->pending     = 1;
259
        buf->waiting     = 0;
260
        buf->list        = DRM_LIST_PEND;
261
#if DRM_DMA_HISTOGRAM
262
        buf->time_dispatched = get_cycles();
263
#endif
264
 
265
        gamma_dma_dispatch(dev, address, length);
266
        drm_free_buffer(dev, dma->this_buffer);
267
        dma->this_buffer = buf;
268
 
269
        atomic_add(length, &dma->total_bytes);
270
        atomic_inc(&dma->total_dmas);
271
 
272
        if (!buf->while_locked && !dev->context_flag && !locked) {
273
                if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
274
                                  DRM_KERNEL_CONTEXT)) {
275
                        DRM_ERROR("\n");
276
                }
277
        }
278
cleanup:
279
 
280
        clear_bit(0, &dev->dma_flag);
281
 
282
#if DRM_DMA_HISTOGRAM
283
        dma_stop = get_cycles();
284
        atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]);
285
#endif
286
 
287
        return retcode;
288
}
289
 
290
static void gamma_dma_schedule_timer_wrapper(unsigned long dev)
291
{
292
        gamma_dma_schedule((drm_device_t *)dev, 0);
293
}
294
 
295
static void gamma_dma_schedule_tq_wrapper(void *dev)
296
{
297
        gamma_dma_schedule(dev, 0);
298
}
299
 
300
int gamma_dma_schedule(drm_device_t *dev, int locked)
301
{
302
        int              next;
303
        drm_queue_t      *q;
304
        drm_buf_t        *buf;
305
        int              retcode   = 0;
306
        int              processed = 0;
307
        int              missed;
308
        int              expire    = 20;
309
        drm_device_dma_t *dma      = dev->dma;
310
#if DRM_DMA_HISTOGRAM
311
        cycles_t         schedule_start;
312
#endif
313
 
314
        if (test_and_set_bit(0, &dev->interrupt_flag)) {
315
                                /* Not reentrant */
316
                atomic_inc(&dma->total_missed_sched);
317
                return -EBUSY;
318
        }
319
        missed = atomic_read(&dma->total_missed_sched);
320
 
321
#if DRM_DMA_HISTOGRAM
322
        schedule_start = get_cycles();
323
#endif
324
 
325
again:
326
        if (dev->context_flag) {
327
                clear_bit(0, &dev->interrupt_flag);
328
                return -EBUSY;
329
        }
330
        if (dma->next_buffer) {
331
                                /* Unsent buffer that was previously
332
                                   selected, but that couldn't be sent
333
                                   because the lock could not be obtained
334
                                   or the DMA engine wasn't ready.  Try
335
                                   again. */
336
                atomic_inc(&dma->total_tried);
337
                if (!(retcode = gamma_do_dma(dev, locked))) {
338
                        atomic_inc(&dma->total_hit);
339
                        ++processed;
340
                }
341
        } else {
342
                do {
343
                        next = drm_select_queue(dev,
344
                                             gamma_dma_schedule_timer_wrapper);
345
                        if (next >= 0) {
346
                                q   = dev->queuelist[next];
347
                                buf = drm_waitlist_get(&q->waitlist);
348
                                dma->next_buffer = buf;
349
                                dma->next_queue  = q;
350
                                if (buf && buf->list == DRM_LIST_RECLAIM) {
351
                                        drm_clear_next_buffer(dev);
352
                                        drm_free_buffer(dev, buf);
353
                                }
354
                        }
355
                } while (next >= 0 && !dma->next_buffer);
356
                if (dma->next_buffer) {
357
                        if (!(retcode = gamma_do_dma(dev, locked))) {
358
                                ++processed;
359
                        }
360
                }
361
        }
362
 
363
        if (--expire) {
364
                if (missed != atomic_read(&dma->total_missed_sched)) {
365
                        atomic_inc(&dma->total_lost);
366
                        if (gamma_dma_is_ready(dev)) goto again;
367
                }
368
                if (processed && gamma_dma_is_ready(dev)) {
369
                        atomic_inc(&dma->total_lost);
370
                        processed = 0;
371
                        goto again;
372
                }
373
        }
374
 
375
        clear_bit(0, &dev->interrupt_flag);
376
 
377
#if DRM_DMA_HISTOGRAM
378
        atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles()
379
                                                           - schedule_start)]);
380
#endif
381
        return retcode;
382
}
383
 
384
static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
385
{
386
        unsigned long     address;
387
        unsigned long     length;
388
        int               must_free = 0;
389
        int               retcode   = 0;
390
        int               i;
391
        int               idx;
392
        drm_buf_t         *buf;
393
        drm_buf_t         *last_buf = NULL;
394
        drm_device_dma_t  *dma      = dev->dma;
395
        DECLARE_WAITQUEUE(entry, current);
396
 
397
                                /* Turn off interrupt handling */
398
        while (test_and_set_bit(0, &dev->interrupt_flag)) {
399
                schedule();
400
                if (signal_pending(current)) return -EINTR;
401
        }
402
        if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
403
                while (!drm_lock_take(&dev->lock.hw_lock->lock,
404
                                      DRM_KERNEL_CONTEXT)) {
405
                        schedule();
406
                        if (signal_pending(current)) {
407
                                clear_bit(0, &dev->interrupt_flag);
408
                                return -EINTR;
409
                        }
410
                }
411
                ++must_free;
412
        }
413
        atomic_inc(&dma->total_prio);
414
 
415
        for (i = 0; i < d->send_count; i++) {
416
                idx = d->send_indices[i];
417
                if (idx < 0 || idx >= dma->buf_count) {
418
                        DRM_ERROR("Index %d (of %d max)\n",
419
                                  d->send_indices[i], dma->buf_count - 1);
420
                        continue;
421
                }
422
                buf = dma->buflist[ idx ];
423
                if (buf->pid != current->pid) {
424
                        DRM_ERROR("Process %d using buffer owned by %d\n",
425
                                  current->pid, buf->pid);
426
                        retcode = -EINVAL;
427
                        goto cleanup;
428
                }
429
                if (buf->list != DRM_LIST_NONE) {
430
                        DRM_ERROR("Process %d using %d's buffer on list %d\n",
431
                                  current->pid, buf->pid, buf->list);
432
                        retcode = -EINVAL;
433
                        goto cleanup;
434
                }
435
                                /* This isn't a race condition on
436
                                   buf->list, since our concern is the
437
                                   buffer reclaim during the time the
438
                                   process closes the /dev/drm? handle, so
439
                                   it can't also be doing DMA. */
440
                buf->list         = DRM_LIST_PRIO;
441
                buf->used         = d->send_sizes[i];
442
                buf->context      = d->context;
443
                buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
444
                address           = (unsigned long)buf->address;
445
                length            = buf->used;
446
                if (!length) {
447
                        DRM_ERROR("0 length buffer\n");
448
                }
449
                if (buf->pending) {
450
                        DRM_ERROR("Sending pending buffer:"
451
                                  " buffer %d, offset %d\n",
452
                                  d->send_indices[i], i);
453
                        retcode = -EINVAL;
454
                        goto cleanup;
455
                }
456
                if (buf->waiting) {
457
                        DRM_ERROR("Sending waiting buffer:"
458
                                  " buffer %d, offset %d\n",
459
                                  d->send_indices[i], i);
460
                        retcode = -EINVAL;
461
                        goto cleanup;
462
                }
463
                buf->pending = 1;
464
 
465
                if (dev->last_context != buf->context
466
                    && !(dev->queuelist[buf->context]->flags
467
                         & _DRM_CONTEXT_PRESERVED)) {
468
                        add_wait_queue(&dev->context_wait, &entry);
469
                        current->state = TASK_INTERRUPTIBLE;
470
                                /* PRE: dev->last_context != buf->context */
471
                        drm_context_switch(dev, dev->last_context,
472
                                           buf->context);
473
                                /* POST: we will wait for the context
474
                                   switch and will dispatch on a later call
475
                                   when dev->last_context == buf->context.
476
                                   NOTE WE HOLD THE LOCK THROUGHOUT THIS
477
                                   TIME! */
478
                        schedule();
479
                        current->state = TASK_RUNNING;
480
                        remove_wait_queue(&dev->context_wait, &entry);
481
                        if (signal_pending(current)) {
482
                                retcode = -EINTR;
483
                                goto cleanup;
484
                        }
485
                        if (dev->last_context != buf->context) {
486
                                DRM_ERROR("Context mismatch: %d %d\n",
487
                                          dev->last_context,
488
                                          buf->context);
489
                        }
490
                }
491
 
492
#if DRM_DMA_HISTOGRAM
493
                buf->time_queued     = get_cycles();
494
                buf->time_dispatched = buf->time_queued;
495
#endif
496
                gamma_dma_dispatch(dev, address, length);
497
                atomic_add(length, &dma->total_bytes);
498
                atomic_inc(&dma->total_dmas);
499
 
500
                if (last_buf) {
501
                        drm_free_buffer(dev, last_buf);
502
                }
503
                last_buf = buf;
504
        }
505
 
506
 
507
cleanup:
508
        if (last_buf) {
509
                gamma_dma_ready(dev);
510
                drm_free_buffer(dev, last_buf);
511
        }
512
 
513
        if (must_free && !dev->context_flag) {
514
                if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
515
                                  DRM_KERNEL_CONTEXT)) {
516
                        DRM_ERROR("\n");
517
                }
518
        }
519
        clear_bit(0, &dev->interrupt_flag);
520
        return retcode;
521
}
522
 
523
static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
524
{
525
        DECLARE_WAITQUEUE(entry, current);
526
        drm_buf_t         *last_buf = NULL;
527
        int               retcode   = 0;
528
        drm_device_dma_t  *dma      = dev->dma;
529
 
530
        if (d->flags & _DRM_DMA_BLOCK) {
531
                last_buf = dma->buflist[d->send_indices[d->send_count-1]];
532
                add_wait_queue(&last_buf->dma_wait, &entry);
533
        }
534
 
535
        if ((retcode = drm_dma_enqueue(dev, d))) {
536
                if (d->flags & _DRM_DMA_BLOCK)
537
                        remove_wait_queue(&last_buf->dma_wait, &entry);
538
                return retcode;
539
        }
540
 
541
        gamma_dma_schedule(dev, 0);
542
 
543
        if (d->flags & _DRM_DMA_BLOCK) {
544
                DRM_DEBUG("%d waiting\n", current->pid);
545
                for (;;) {
546
                        current->state = TASK_INTERRUPTIBLE;
547
                        if (!last_buf->waiting && !last_buf->pending)
548
                                break; /* finished */
549
                        schedule();
550
                        if (signal_pending(current)) {
551
                                retcode = -EINTR; /* Can't restart */
552
                                break;
553
                        }
554
                }
555
                current->state = TASK_RUNNING;
556
                DRM_DEBUG("%d running\n", current->pid);
557
                remove_wait_queue(&last_buf->dma_wait, &entry);
558
                if (!retcode
559
                    || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
560
                        if (!waitqueue_active(&last_buf->dma_wait)) {
561
                                drm_free_buffer(dev, last_buf);
562
                        }
563
                }
564
                if (retcode) {
565
                        DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n",
566
                                  d->context,
567
                                  last_buf->waiting,
568
                                  last_buf->pending,
569
                                  DRM_WAITCOUNT(dev, d->context),
570
                                  last_buf->idx,
571
                                  last_buf->list,
572
                                  last_buf->pid,
573
                                  current->pid);
574
                }
575
        }
576
        return retcode;
577
}
578
 
579
int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
580
              unsigned long arg)
581
{
582
        drm_file_t        *priv     = filp->private_data;
583
        drm_device_t      *dev      = priv->dev;
584
        drm_device_dma_t  *dma      = dev->dma;
585
        int               retcode   = 0;
586
        drm_dma_t         d;
587
 
588
        if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
589
                return -EFAULT;
590
        DRM_DEBUG("%d %d: %d send, %d req\n",
591
                  current->pid, d.context, d.send_count, d.request_count);
592
 
593
        if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) {
594
                DRM_ERROR("Process %d using context %d\n",
595
                          current->pid, d.context);
596
                return -EINVAL;
597
        }
598
        if (d.send_count < 0 || d.send_count > dma->buf_count) {
599
                DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
600
                          current->pid, d.send_count, dma->buf_count);
601
                return -EINVAL;
602
        }
603
        if (d.request_count < 0 || d.request_count > dma->buf_count) {
604
                DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
605
                          current->pid, d.request_count, dma->buf_count);
606
                return -EINVAL;
607
        }
608
 
609
        if (d.send_count) {
610
                if (d.flags & _DRM_DMA_PRIORITY)
611
                        retcode = gamma_dma_priority(dev, &d);
612
                else
613
                        retcode = gamma_dma_send_buffers(dev, &d);
614
        }
615
 
616
        d.granted_count = 0;
617
 
618
        if (!retcode && d.request_count) {
619
                retcode = drm_dma_get_buffers(dev, &d);
620
        }
621
 
622
        DRM_DEBUG("%d returning, granted = %d\n",
623
                  current->pid, d.granted_count);
624
        if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
625
                return -EFAULT;
626
 
627
        return retcode;
628
}
629
 
630
int gamma_irq_install(drm_device_t *dev, int irq)
631
{
632
        int retcode;
633
 
634
        if (!irq)     return -EINVAL;
635
 
636
        down(&dev->struct_sem);
637
        if (dev->irq) {
638
                up(&dev->struct_sem);
639
                return -EBUSY;
640
        }
641
        dev->irq = irq;
642
        up(&dev->struct_sem);
643
 
644
        DRM_DEBUG("%d\n", irq);
645
 
646
        dev->context_flag     = 0;
647
        dev->interrupt_flag   = 0;
648
        dev->dma_flag         = 0;
649
 
650
        dev->dma->next_buffer = NULL;
651
        dev->dma->next_queue  = NULL;
652
        dev->dma->this_buffer = NULL;
653
 
654
        INIT_LIST_HEAD(&dev->tq.list);
655
        dev->tq.sync          = 0;
656
        dev->tq.routine       = gamma_dma_schedule_tq_wrapper;
657
        dev->tq.data          = dev;
658
 
659
 
660
                                /* Before installing handler */
661
        GAMMA_WRITE(GAMMA_GCOMMANDMODE, 0);
662
        GAMMA_WRITE(GAMMA_GDMACONTROL, 0);
663
 
664
                                /* Install handler */
665
        if ((retcode = request_irq(dev->irq,
666
                                   gamma_dma_service,
667
                                   0,
668
                                   dev->devname,
669
                                   dev))) {
670
                down(&dev->struct_sem);
671
                dev->irq = 0;
672
                up(&dev->struct_sem);
673
                return retcode;
674
        }
675
 
676
                                /* After installing handler */
677
        GAMMA_WRITE(GAMMA_GINTENABLE,       0x2001);
678
        GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0x0008);
679
        GAMMA_WRITE(GAMMA_GDELAYTIMER,     0x39090);
680
 
681
        return 0;
682
}
683
 
684
int gamma_irq_uninstall(drm_device_t *dev)
685
{
686
        int irq;
687
 
688
        down(&dev->struct_sem);
689
        irq      = dev->irq;
690
        dev->irq = 0;
691
        up(&dev->struct_sem);
692
 
693
        if (!irq) return -EINVAL;
694
 
695
        DRM_DEBUG("%d\n", irq);
696
 
697
        GAMMA_WRITE(GAMMA_GDELAYTIMER,      0);
698
        GAMMA_WRITE(GAMMA_COMMANDINTENABLE, 0);
699
        GAMMA_WRITE(GAMMA_GINTENABLE,       0);
700
        free_irq(irq, dev);
701
 
702
        return 0;
703
}
704
 
705
 
706
int gamma_control(struct inode *inode, struct file *filp, unsigned int cmd,
707
                  unsigned long arg)
708
{
709
        drm_file_t      *priv   = filp->private_data;
710
        drm_device_t    *dev    = priv->dev;
711
        drm_control_t   ctl;
712
        int             retcode;
713
 
714
        if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
715
                return -EFAULT;
716
 
717
        switch (ctl.func) {
718
        case DRM_INST_HANDLER:
719
                if ((retcode = gamma_irq_install(dev, ctl.irq)))
720
                        return retcode;
721
                break;
722
        case DRM_UNINST_HANDLER:
723
                if ((retcode = gamma_irq_uninstall(dev)))
724
                        return retcode;
725
                break;
726
        default:
727
                return -EINVAL;
728
        }
729
        return 0;
730
}
731
 
732
int gamma_lock(struct inode *inode, struct file *filp, unsigned int cmd,
733
               unsigned long arg)
734
{
735
        drm_file_t        *priv   = filp->private_data;
736
        drm_device_t      *dev    = priv->dev;
737
        DECLARE_WAITQUEUE(entry, current);
738
        int               ret   = 0;
739
        drm_lock_t        lock;
740
        drm_queue_t       *q;
741
#if DRM_DMA_HISTOGRAM
742
        cycles_t          start;
743
 
744
        dev->lck_start = start = get_cycles();
745
#endif
746
 
747
        if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
748
                return -EFAULT;
749
 
750
        if (lock.context == DRM_KERNEL_CONTEXT) {
751
                DRM_ERROR("Process %d using kernel context %d\n",
752
                          current->pid, lock.context);
753
                return -EINVAL;
754
        }
755
 
756
        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
757
                  lock.context, current->pid, dev->lock.hw_lock->lock,
758
                  lock.flags);
759
 
760
        if (lock.context < 0 || lock.context >= dev->queue_count)
761
                return -EINVAL;
762
        q = dev->queuelist[lock.context];
763
 
764
        ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
765
 
766
        if (!ret) {
767
                if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
768
                    != lock.context) {
769
                        long j = jiffies - dev->lock.lock_time;
770
 
771
                        if (j > 0 && j <= DRM_LOCK_SLICE) {
772
                                /* Can't take lock if we just had it and
773
                                   there is contention. */
774
                                current->state = TASK_INTERRUPTIBLE;
775
                                schedule_timeout(j);
776
                        }
777
                }
778
                add_wait_queue(&dev->lock.lock_queue, &entry);
779
                for (;;) {
780
                        current->state = TASK_INTERRUPTIBLE;
781
                        if (!dev->lock.hw_lock) {
782
                                /* Device has been unregistered */
783
                                ret = -EINTR;
784
                                break;
785
                        }
786
                        if (drm_lock_take(&dev->lock.hw_lock->lock,
787
                                          lock.context)) {
788
                                dev->lock.pid       = current->pid;
789
                                dev->lock.lock_time = jiffies;
790
                                atomic_inc(&dev->total_locks);
791
                                atomic_inc(&q->total_locks);
792
                                break;  /* Got lock */
793
                        }
794
 
795
                                /* Contention */
796
                        atomic_inc(&dev->total_sleeps);
797
                        schedule();
798
                        if (signal_pending(current)) {
799
                                ret = -ERESTARTSYS;
800
                                break;
801
                        }
802
                }
803
                current->state = TASK_RUNNING;
804
                remove_wait_queue(&dev->lock.lock_queue, &entry);
805
        }
806
 
807
        drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */
808
 
809
        if (!ret) {
810
                sigemptyset(&dev->sigmask);
811
                sigaddset(&dev->sigmask, SIGSTOP);
812
                sigaddset(&dev->sigmask, SIGTSTP);
813
                sigaddset(&dev->sigmask, SIGTTIN);
814
                sigaddset(&dev->sigmask, SIGTTOU);
815
                dev->sigdata.context = lock.context;
816
                dev->sigdata.lock    = dev->lock.hw_lock;
817
                block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
818
 
819
                if (lock.flags & _DRM_LOCK_READY)
820
                        gamma_dma_ready(dev);
821
                if (lock.flags & _DRM_LOCK_QUIESCENT) {
822
                        if (gamma_found() == 1) {
823
                                gamma_dma_quiescent_single(dev);
824
                        } else {
825
                                gamma_dma_quiescent_dual(dev);
826
                        }
827
                }
828
        }
829
        DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
830
 
831
#if DRM_DMA_HISTOGRAM
832
        atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
833
#endif
834
 
835
        return ret;
836
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.