OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [char/] [drm-4.0/] [mga_dma.c] - Blame information for rev 1275

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
2
 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3
 *
4
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6
 * All Rights Reserved.
7
 *
8
 * Permission is hereby granted, free of charge, to any person obtaining a
9
 * copy of this software and associated documentation files (the "Software"),
10
 * to deal in the Software without restriction, including without limitation
11
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12
 * and/or sell copies of the Software, and to permit persons to whom the
13
 * Software is furnished to do so, subject to the following conditions:
14
 *
15
 * The above copyright notice and this permission notice (including the next
16
 * paragraph) shall be included in all copies or substantial portions of the
17
 * Software.
18
 *
19
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22
 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25
 * DEALINGS IN THE SOFTWARE.
26
 *
27
 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28
 *          Jeff Hartmann <jhartmann@valinux.com>
29
 *          Keith Whitwell <keithw@valinux.com>
30
 *
31
 */
32
 
33
#define __NO_VERSION__
34
#include "drmP.h"
35
#include "mga_drv.h"
36
 
37
#include <linux/interrupt.h>    /* For task queue support */
38
 
39
#define MGA_REG(reg)            2
40
#define MGA_BASE(reg)           ((unsigned long) \
41
                                ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
42
#define MGA_ADDR(reg)           (MGA_BASE(reg) + reg)
43
#define MGA_DEREF(reg)          *(__volatile__ int *)MGA_ADDR(reg)
44
#define MGA_READ(reg)           MGA_DEREF(reg)
45
#define MGA_WRITE(reg,val)      do { MGA_DEREF(reg) = val; } while (0)
46
 
47
#define PDEA_pagpxfer_enable         0x2
48
 
49
static int mga_flush_queue(drm_device_t *dev);
50
 
51
static unsigned long mga_alloc_page(drm_device_t *dev)
52
{
53
        unsigned long address;
54
 
55
        address = __get_free_page(GFP_KERNEL);
56
        if(address == 0UL) {
57
                return 0;
58
        }
59
        atomic_inc(&virt_to_page(address)->count);
60
        set_bit(PG_reserved, &virt_to_page(address)->flags);
61
 
62
        return address;
63
}
64
 
65
static void mga_free_page(drm_device_t *dev, unsigned long page)
66
{
67
        if(!page) return;
68
        atomic_dec(&virt_to_page(page)->count);
69
        clear_bit(PG_reserved, &virt_to_page(page)->flags);
70
        free_page(page);
71
        return;
72
}
73
 
74
static void mga_delay(void)
75
{
76
        return;
77
}
78
 
79
/* These are two age tags that will never be sent to
80
 * the hardware */
81
#define MGA_BUF_USED    0xffffffff
82
#define MGA_BUF_FREE    0
83
 
84
static int mga_freelist_init(drm_device_t *dev)
85
{
86
        drm_device_dma_t *dma = dev->dma;
87
        drm_buf_t *buf;
88
        drm_mga_buf_priv_t *buf_priv;
89
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
90
        drm_mga_freelist_t *item;
91
        int i;
92
 
93
        dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
94
        if(dev_priv->head == NULL) return -ENOMEM;
95
        memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
96
        dev_priv->head->age = MGA_BUF_USED;
97
 
98
        for (i = 0; i < dma->buf_count; i++) {
99
                buf = dma->buflist[ i ];
100
                buf_priv = buf->dev_private;
101
                item = drm_alloc(sizeof(drm_mga_freelist_t),
102
                                 DRM_MEM_DRIVER);
103
                if(item == NULL) return -ENOMEM;
104
                memset(item, 0, sizeof(drm_mga_freelist_t));
105
                item->age = MGA_BUF_FREE;
106
                item->prev = dev_priv->head;
107
                item->next = dev_priv->head->next;
108
                if(dev_priv->head->next != NULL)
109
                        dev_priv->head->next->prev = item;
110
                if(item->next == NULL) dev_priv->tail = item;
111
                item->buf = buf;
112
                buf_priv->my_freelist = item;
113
                buf_priv->discard = 0;
114
                buf_priv->dispatched = 0;
115
                dev_priv->head->next = item;
116
        }
117
 
118
        return 0;
119
}
120
 
121
static void mga_freelist_cleanup(drm_device_t *dev)
122
{
123
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
124
        drm_mga_freelist_t *item;
125
        drm_mga_freelist_t *prev;
126
 
127
        item = dev_priv->head;
128
        while(item) {
129
                prev = item;
130
                item = item->next;
131
                drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
132
        }
133
 
134
        dev_priv->head = dev_priv->tail = NULL;
135
}
136
 
137
/* Frees dispatch lock */
138
static inline void mga_dma_quiescent(drm_device_t *dev)
139
{
140
        drm_device_dma_t  *dma      = dev->dma;
141
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
142
        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
143
        unsigned long end;
144
        int i;
145
 
146
        DRM_DEBUG("dispatch_status = 0x%02lx\n", dev_priv->dispatch_status);
147
        end = jiffies + (HZ*3);
148
        while(1) {
149
                if(!test_and_set_bit(MGA_IN_DISPATCH,
150
                                     &dev_priv->dispatch_status)) {
151
                        break;
152
                }
153
                if((signed)(end - jiffies) <= 0) {
154
                        DRM_ERROR("irqs: %d wanted %d\n",
155
                                  atomic_read(&dev->total_irq),
156
                                  atomic_read(&dma->total_lost));
157
                        DRM_ERROR("lockup: dispatch_status = 0x%02lx,"
158
                                  " jiffies = %lu, end = %lu\n",
159
                                  dev_priv->dispatch_status, jiffies, end);
160
                        return;
161
                }
162
                for (i = 0 ; i < 2000 ; i++) mga_delay();
163
        }
164
        end = jiffies + (HZ*3);
165
        DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS));
166
        while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
167
                if((signed)(end - jiffies) <= 0) {
168
                        DRM_ERROR("irqs: %d wanted %d\n",
169
                                  atomic_read(&dev->total_irq),
170
                                  atomic_read(&dma->total_lost));
171
                        DRM_ERROR("lockup\n");
172
                        clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
173
                        return;
174
                }
175
                for (i = 0 ; i < 2000 ; i++) mga_delay();
176
        }
177
        sarea_priv->dirty |= MGA_DMA_FLUSH;
178
 
179
        clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
180
        DRM_DEBUG("exit, dispatch_status = 0x%02lx\n",
181
                  dev_priv->dispatch_status);
182
}
183
 
184
static void mga_reset_freelist(drm_device_t *dev)
185
{
186
        drm_device_dma_t  *dma      = dev->dma;
187
        drm_buf_t *buf;
188
        drm_mga_buf_priv_t *buf_priv;
189
        int i;
190
 
191
        for (i = 0; i < dma->buf_count; i++) {
192
                buf = dma->buflist[ i ];
193
                buf_priv = buf->dev_private;
194
                buf_priv->my_freelist->age = MGA_BUF_FREE;
195
        }
196
}
197
 
198
/* Least recently used :
199
 * These operations are not atomic b/c they are protected by the
200
 * hardware lock */
201
 
202
drm_buf_t *mga_freelist_get(drm_device_t *dev)
203
{
204
        DECLARE_WAITQUEUE(entry, current);
205
        drm_mga_private_t *dev_priv =
206
                (drm_mga_private_t *) dev->dev_private;
207
        drm_mga_freelist_t *prev;
208
        drm_mga_freelist_t *next;
209
        static int failed = 0;
210
        int return_null = 0;
211
 
212
        if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) {
213
                DRM_DEBUG("Waiting on freelist,"
214
                          " tail->age = %d, last_prim_age= %d\n",
215
                          dev_priv->tail->age,
216
                          dev_priv->last_prim_age);
217
                add_wait_queue(&dev_priv->buf_queue, &entry);
218
                set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
219
                for (;;) {
220
                        current->state = TASK_INTERRUPTIBLE;
221
                        mga_dma_schedule(dev, 0);
222
                        if(dev_priv->tail->age < dev_priv->last_prim_age)
223
                                break;
224
                        atomic_inc(&dev->total_sleeps);
225
                        schedule();
226
                        if (signal_pending(current)) {
227
                                ++return_null;
228
                                break;
229
                        }
230
                }
231
                clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
232
                current->state = TASK_RUNNING;
233
                remove_wait_queue(&dev_priv->buf_queue, &entry);
234
                if (return_null) return NULL;
235
        }
236
 
237
        if(dev_priv->tail->age < dev_priv->last_prim_age) {
238
                prev = dev_priv->tail->prev;
239
                next = dev_priv->tail;
240
                prev->next = NULL;
241
                next->prev = next->next = NULL;
242
                dev_priv->tail = prev;
243
                next->age = MGA_BUF_USED;
244
                failed = 0;
245
                return next->buf;
246
        }
247
 
248
        failed++;
249
        return NULL;
250
}
251
 
252
int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
253
{
254
        drm_mga_private_t *dev_priv =
255
                (drm_mga_private_t *) dev->dev_private;
256
        drm_mga_buf_priv_t *buf_priv = buf->dev_private;
257
        drm_mga_freelist_t *prev;
258
        drm_mga_freelist_t *head;
259
        drm_mga_freelist_t *next;
260
 
261
        if(buf_priv->my_freelist->age == MGA_BUF_USED) {
262
                /* Discarded buffer, put it on the tail */
263
                next = buf_priv->my_freelist;
264
                next->age = MGA_BUF_FREE;
265
                prev = dev_priv->tail;
266
                prev->next = next;
267
                next->prev = prev;
268
                next->next = NULL;
269
                dev_priv->tail = next;
270
        } else {
271
                /* Normally aged buffer, put it on the head + 1,
272
                 * as the real head is a sentinal element
273
                 */
274
                next = buf_priv->my_freelist;
275
                head = dev_priv->head;
276
                prev = head->next;
277
                head->next = next;
278
                prev->prev = next;
279
                next->prev = head;
280
                next->next = prev;
281
        }
282
 
283
        return 0;
284
}
285
 
286
static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
287
{
288
        drm_mga_private_t *dev_priv = dev->dev_private;
289
        drm_mga_prim_buf_t *prim_buffer;
290
        int i, temp, size_of_buf;
291
        int offset = init->reserved_map_agpstart;
292
 
293
        dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
294
                                  PAGE_SIZE) * PAGE_SIZE;
295
        size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS;
296
        dev_priv->warp_ucode_size = init->warp_ucode_size;
297
        dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) *
298
                                        (MGA_NUM_PRIM_BUFS + 1),
299
                                        DRM_MEM_DRIVER);
300
        if(dev_priv->prim_bufs == NULL) {
301
                DRM_ERROR("Unable to allocate memory for prim_buf\n");
302
                return -ENOMEM;
303
        }
304
        memset(dev_priv->prim_bufs,
305
               0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1));
306
 
307
        temp = init->warp_ucode_size + dev_priv->primary_size;
308
        temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
309
 
310
        dev_priv->ioremap = drm_ioremap(dev->agp->base + offset,
311
                                        temp, dev);
312
        if(dev_priv->ioremap == NULL) {
313
                DRM_ERROR("Ioremap failed\n");
314
                return -ENOMEM;
315
        }
316
        init_waitqueue_head(&dev_priv->wait_queue);
317
 
318
        for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
319
                prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t),
320
                                        DRM_MEM_DRIVER);
321
                if(prim_buffer == NULL) return -ENOMEM;
322
                memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t));
323
                prim_buffer->phys_head = offset + dev->agp->base;
324
                prim_buffer->current_dma_ptr =
325
                        prim_buffer->head =
326
                        (u32 *) (dev_priv->ioremap +
327
                                 offset -
328
                                 init->reserved_map_agpstart);
329
                prim_buffer->num_dwords = 0;
330
                prim_buffer->max_dwords = size_of_buf / sizeof(u32);
331
                prim_buffer->max_dwords -= 5; /* Leave room for the softrap */
332
                prim_buffer->sec_used = 0;
333
                prim_buffer->idx = i;
334
                prim_buffer->prim_age = i + 1;
335
                offset = offset + size_of_buf;
336
                dev_priv->prim_bufs[i] = prim_buffer;
337
        }
338
        dev_priv->current_prim_idx = 0;
339
        dev_priv->next_prim =
340
                dev_priv->last_prim =
341
                dev_priv->current_prim =
342
                dev_priv->prim_bufs[0];
343
        dev_priv->next_prim_age = 2;
344
        dev_priv->last_prim_age = 1;
345
        set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status);
346
        return 0;
347
}
348
 
349
void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
350
{
351
        drm_mga_private_t *dev_priv = dev->dev_private;
352
        drm_device_dma_t  *dma      = dev->dma;
353
        drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
354
        int use_agp = PDEA_pagpxfer_enable;
355
        unsigned long end;
356
        int i;
357
        int next_idx;
358
        PRIMLOCALS;
359
 
360
        dev_priv->last_prim = prim;
361
 
362
        /* We never check for overflow, b/c there is always room */
363
        PRIMPTR(prim);
364
        if(num_dwords <= 0) {
365
                DRM_ERROR("num_dwords == 0 when dispatched\n");
366
                goto out_prim_wait;
367
        }
368
        PRIMOUTREG( MGAREG_DMAPAD, 0);
369
        PRIMOUTREG( MGAREG_DMAPAD, 0);
370
        PRIMOUTREG( MGAREG_DMAPAD, 0);
371
        PRIMOUTREG( MGAREG_SOFTRAP, 0);
372
        PRIMFINISH(prim);
373
 
374
        end = jiffies + (HZ*3);
375
        if(sarea_priv->dirty & MGA_DMA_FLUSH) {
376
                while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
377
                        if((signed)(end - jiffies) <= 0) {
378
                                DRM_ERROR("irqs: %d wanted %d\n",
379
                                          atomic_read(&dev->total_irq),
380
                                          atomic_read(&dma->total_lost));
381
                                DRM_ERROR("lockup (flush)\n");
382
                                goto out_prim_wait;
383
                        }
384
 
385
                        for (i = 0 ; i < 4096 ; i++) mga_delay();
386
                }
387
                sarea_priv->dirty &= ~(MGA_DMA_FLUSH);
388
        } else {
389
                while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) {
390
                        if((signed)(end - jiffies) <= 0) {
391
                                DRM_ERROR("irqs: %d wanted %d\n",
392
                                          atomic_read(&dev->total_irq),
393
                                          atomic_read(&dma->total_lost));
394
                                DRM_ERROR("lockup (wait)\n");
395
                                goto out_prim_wait;
396
                        }
397
 
398
                        for (i = 0 ; i < 4096 ; i++) mga_delay();
399
                }
400
        }
401
 
402
        mga_flush_write_combine();
403
        atomic_inc(&dev_priv->pending_bufs);
404
        MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
405
        MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
406
        prim->num_dwords = 0;
407
        sarea_priv->last_enqueue = prim->prim_age;
408
 
409
        next_idx = prim->idx + 1;
410
        if(next_idx >= MGA_NUM_PRIM_BUFS)
411
                next_idx = 0;
412
 
413
        dev_priv->next_prim = dev_priv->prim_bufs[next_idx];
414
        return;
415
 
416
 out_prim_wait:
417
        prim->num_dwords = 0;
418
        prim->sec_used = 0;
419
        clear_bit(MGA_BUF_IN_USE, &prim->buffer_status);
420
        wake_up_interruptible(&dev_priv->wait_queue);
421
        clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status);
422
        clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
423
}
424
 
425
int mga_advance_primary(drm_device_t *dev)
426
{
427
        DECLARE_WAITQUEUE(entry, current);
428
        drm_mga_private_t *dev_priv = dev->dev_private;
429
        drm_mga_prim_buf_t *prim_buffer;
430
        drm_device_dma_t  *dma      = dev->dma;
431
        int next_prim_idx;
432
        int ret = 0;
433
 
434
        /* This needs to reset the primary buffer if available,
435
         * we should collect stats on how many times it bites
436
         * it's tail */
437
 
438
        next_prim_idx = dev_priv->current_prim_idx + 1;
439
        if(next_prim_idx >= MGA_NUM_PRIM_BUFS)
440
                next_prim_idx = 0;
441
        prim_buffer = dev_priv->prim_bufs[next_prim_idx];
442
        set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
443
 
444
        /* In use is cleared in interrupt handler */
445
 
446
        if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) {
447
                add_wait_queue(&dev_priv->wait_queue, &entry);
448
                for (;;) {
449
                        current->state = TASK_INTERRUPTIBLE;
450
                        mga_dma_schedule(dev, 0);
451
                        if(!test_and_set_bit(MGA_BUF_IN_USE,
452
                                             &prim_buffer->buffer_status))
453
                                break;
454
                        atomic_inc(&dev->total_sleeps);
455
                        atomic_inc(&dma->total_missed_sched);
456
                        schedule();
457
                        if (signal_pending(current)) {
458
                                ret = -ERESTARTSYS;
459
                                break;
460
                        }
461
                }
462
                current->state = TASK_RUNNING;
463
                remove_wait_queue(&dev_priv->wait_queue, &entry);
464
                if(ret) return ret;
465
        }
466
        clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
467
 
468
        /* This primary buffer is now free to use */
469
        prim_buffer->current_dma_ptr = prim_buffer->head;
470
        prim_buffer->num_dwords = 0;
471
        prim_buffer->sec_used = 0;
472
        prim_buffer->prim_age = dev_priv->next_prim_age++;
473
        if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) {
474
                mga_flush_queue(dev);
475
                mga_dma_quiescent(dev);
476
                mga_reset_freelist(dev);
477
                prim_buffer->prim_age = (dev_priv->next_prim_age += 2);
478
        }
479
 
480
        /* Reset all buffer status stuff */
481
        clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status);
482
        clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status);
483
        clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status);
484
 
485
        dev_priv->current_prim = prim_buffer;
486
        dev_priv->current_prim_idx = next_prim_idx;
487
        return 0;
488
}
489
 
490
/* More dynamic performance decisions */
491
static inline int mga_decide_to_fire(drm_device_t *dev)
492
{
493
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
494
 
495
        if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) {
496
                return 1;
497
        }
498
 
499
        if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
500
            dev_priv->next_prim->num_dwords) {
501
                return 1;
502
        }
503
 
504
        if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
505
            dev_priv->next_prim->num_dwords) {
506
                return 1;
507
        }
508
 
509
        if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) {
510
                if(test_bit(MGA_BUF_SWAP_PENDING,
511
                            &dev_priv->next_prim->buffer_status)) {
512
                        return 1;
513
                }
514
        }
515
 
516
        if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS / 2) {
517
                if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 8) {
518
                        return 1;
519
                }
520
        }
521
 
522
        if(atomic_read(&dev_priv->pending_bufs) >= MGA_NUM_PRIM_BUFS / 2) {
523
                if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 4) {
524
                        return 1;
525
                }
526
        }
527
 
528
        return 0;
529
}
530
 
531
int mga_dma_schedule(drm_device_t *dev, int locked)
532
{
533
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
534
        int               retval    = 0;
535
 
536
        if (!dev_priv) return -EBUSY;
537
 
538
        if (test_and_set_bit(0, &dev->dma_flag)) {
539
                retval = -EBUSY;
540
                goto sch_out_wakeup;
541
        }
542
 
543
        if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) ||
544
           test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) ||
545
           test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
546
                locked = 1;
547
        }
548
 
549
        if (!locked &&
550
            !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
551
                clear_bit(0, &dev->dma_flag);
552
                retval = -EBUSY;
553
                goto sch_out_wakeup;
554
        }
555
 
556
        if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) {
557
                /* Fire dma buffer */
558
                if(mga_decide_to_fire(dev)) {
559
                        clear_bit(MGA_BUF_FORCE_FIRE,
560
                                  &dev_priv->next_prim->buffer_status);
561
                        if(dev_priv->current_prim == dev_priv->next_prim) {
562
                                /* Schedule overflow for a later time */
563
                                set_bit(MGA_BUF_NEEDS_OVERFLOW,
564
                                        &dev_priv->next_prim->buffer_status);
565
                        }
566
                        mga_fire_primary(dev, dev_priv->next_prim);
567
                } else {
568
                        clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
569
                }
570
        }
571
 
572
        if (!locked) {
573
                if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
574
                                  DRM_KERNEL_CONTEXT)) {
575
                        DRM_ERROR("\n");
576
                }
577
        }
578
 
579
        clear_bit(0, &dev->dma_flag);
580
 
581
sch_out_wakeup:
582
        if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
583
           atomic_read(&dev_priv->pending_bufs) == 0) {
584
                /* Everything has been processed by the hardware */
585
                clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
586
                wake_up_interruptible(&dev_priv->flush_queue);
587
        }
588
 
589
        if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)
590
           && dev_priv->tail->age < dev_priv->last_prim_age)
591
                wake_up_interruptible(&dev_priv->buf_queue);
592
 
593
        return retval;
594
}
595
 
596
static void mga_dma_service(int irq, void *device, struct pt_regs *regs)
597
{
598
        drm_device_t     *dev = (drm_device_t *)device;
599
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
600
        drm_mga_prim_buf_t *last_prim_buffer;
601
 
602
        atomic_inc(&dev->total_irq);
603
        if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return;
604
        MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
605
        last_prim_buffer = dev_priv->last_prim;
606
        last_prim_buffer->num_dwords = 0;
607
        last_prim_buffer->sec_used = 0;
608
        dev_priv->sarea_priv->last_dispatch =
609
                dev_priv->last_prim_age = last_prim_buffer->prim_age;
610
        clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status);
611
        clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status);
612
        clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
613
        atomic_dec(&dev_priv->pending_bufs);
614
        queue_task(&dev->tq, &tq_immediate);
615
        mark_bh(IMMEDIATE_BH);
616
        wake_up_interruptible(&dev_priv->wait_queue);
617
}
618
 
619
static void mga_dma_task_queue(void *device)
620
{
621
        mga_dma_schedule((drm_device_t *)device, 0);
622
}
623
 
624
int mga_dma_cleanup(drm_device_t *dev)
625
{
626
        if(dev->dev_private) {
627
                drm_mga_private_t *dev_priv =
628
                        (drm_mga_private_t *) dev->dev_private;
629
 
630
                if (dev->irq) mga_flush_queue(dev);
631
                mga_dma_quiescent(dev);
632
 
633
                if(dev_priv->ioremap) {
634
                        int temp = (dev_priv->warp_ucode_size +
635
                                    dev_priv->primary_size +
636
                                    PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
637
 
638
                        drm_ioremapfree((void *) dev_priv->ioremap, temp, dev);
639
                }
640
                if(dev_priv->status_page != NULL) {
641
                        iounmap(dev_priv->status_page);
642
                }
643
                if(dev_priv->real_status_page != 0UL) {
644
                        mga_free_page(dev, dev_priv->real_status_page);
645
                }
646
                if(dev_priv->prim_bufs != NULL) {
647
                        int i;
648
                        for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
649
                                if(dev_priv->prim_bufs[i] != NULL) {
650
                                        drm_free(dev_priv->prim_bufs[i],
651
                                                 sizeof(drm_mga_prim_buf_t),
652
                                                 DRM_MEM_DRIVER);
653
                                }
654
                        }
655
                        drm_free(dev_priv->prim_bufs, sizeof(void *) *
656
                                 (MGA_NUM_PRIM_BUFS + 1),
657
                                 DRM_MEM_DRIVER);
658
                }
659
                if(dev_priv->head != NULL) {
660
                        mga_freelist_cleanup(dev);
661
                }
662
 
663
 
664
                drm_free(dev->dev_private, sizeof(drm_mga_private_t),
665
                         DRM_MEM_DRIVER);
666
                dev->dev_private = NULL;
667
        }
668
 
669
        return 0;
670
}
671
 
672
static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
673
        drm_mga_private_t *dev_priv;
674
        drm_map_t *sarea_map = NULL;
675
 
676
        dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
677
        if(dev_priv == NULL) return -ENOMEM;
678
        dev->dev_private = (void *) dev_priv;
679
 
680
        memset(dev_priv, 0, sizeof(drm_mga_private_t));
681
 
682
        if((init->reserved_map_idx >= dev->map_count) ||
683
           (init->buffer_map_idx >= dev->map_count)) {
684
                mga_dma_cleanup(dev);
685
                return -EINVAL;
686
        }
687
 
688
        dev_priv->reserved_map_idx = init->reserved_map_idx;
689
        dev_priv->buffer_map_idx = init->buffer_map_idx;
690
        sarea_map = dev->maplist[0];
691
        dev_priv->sarea_priv = (drm_mga_sarea_t *)
692
                ((u8 *)sarea_map->handle +
693
                 init->sarea_priv_offset);
694
 
695
        /* Scale primary size to the next page */
696
        dev_priv->chipset = init->chipset;
697
        dev_priv->frontOffset = init->frontOffset;
698
        dev_priv->backOffset = init->backOffset;
699
        dev_priv->depthOffset = init->depthOffset;
700
        dev_priv->textureOffset = init->textureOffset;
701
        dev_priv->textureSize = init->textureSize;
702
        dev_priv->cpp = init->cpp;
703
        dev_priv->sgram = init->sgram;
704
        dev_priv->stride = init->stride;
705
 
706
        dev_priv->mAccess = init->mAccess;
707
        init_waitqueue_head(&dev_priv->flush_queue);
708
        init_waitqueue_head(&dev_priv->buf_queue);
709
        dev_priv->WarpPipe = 0xff000000;
710
        dev_priv->vertexsize = 0;
711
 
712
        DRM_DEBUG("chipset=%d ucode_size=%d backOffset=%x depthOffset=%x\n",
713
                  dev_priv->chipset, dev_priv->warp_ucode_size,
714
                  dev_priv->backOffset, dev_priv->depthOffset);
715
        DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n",
716
                  dev_priv->cpp, dev_priv->sgram, dev_priv->stride,
717
                  dev_priv->mAccess);
718
 
719
        memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
720
               sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES);
721
 
722
        if(mga_init_primary_bufs(dev, init) != 0) {
723
                DRM_ERROR("Can not initialize primary buffers\n");
724
                mga_dma_cleanup(dev);
725
                return -ENOMEM;
726
        }
727
        dev_priv->real_status_page = mga_alloc_page(dev);
728
        if(dev_priv->real_status_page == 0UL) {
729
                mga_dma_cleanup(dev);
730
                DRM_ERROR("Can not allocate status page\n");
731
                return -ENOMEM;
732
        }
733
 
734
        dev_priv->status_page =
735
                ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page),
736
                                PAGE_SIZE);
737
 
738
        if(dev_priv->status_page == NULL) {
739
                mga_dma_cleanup(dev);
740
                DRM_ERROR("Can not remap status page\n");
741
                return -ENOMEM;
742
        }
743
 
744
        /* Write status page when secend or softrap occurs */
745
        MGA_WRITE(MGAREG_PRIMPTR,
746
                  virt_to_bus((void *)dev_priv->real_status_page) | 0x00000003);
747
 
748
 
749
        /* Private is now filled in, initialize the hardware */
750
        {
751
                PRIMLOCALS;
752
                PRIMGETPTR( dev_priv );
753
 
754
                PRIMOUTREG(MGAREG_DMAPAD, 0);
755
                PRIMOUTREG(MGAREG_DMAPAD, 0);
756
                PRIMOUTREG(MGAREG_DWGSYNC, 0x0100);
757
                PRIMOUTREG(MGAREG_SOFTRAP, 0);
758
                /* Poll for the first buffer to insure that
759
                 * the status register will be correct
760
                 */
761
 
762
                mga_flush_write_combine();
763
                MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
764
 
765
                MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
766
                                           PDEA_pagpxfer_enable));
767
 
768
                while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ;
769
        }
770
 
771
        if(mga_freelist_init(dev) != 0) {
772
                DRM_ERROR("Could not initialize freelist\n");
773
                mga_dma_cleanup(dev);
774
                return -ENOMEM;
775
        }
776
        return 0;
777
}
778
 
779
int mga_dma_init(struct inode *inode, struct file *filp,
780
                 unsigned int cmd, unsigned long arg)
781
{
782
        drm_file_t *priv = filp->private_data;
783
        drm_device_t *dev = priv->dev;
784
        drm_mga_init_t init;
785
 
786
        if (copy_from_user(&init, (drm_mga_init_t *)arg, sizeof(init)))
787
                return -EFAULT;
788
 
789
        switch(init.func) {
790
        case MGA_INIT_DMA:
791
                return mga_dma_initialize(dev, &init);
792
        case MGA_CLEANUP_DMA:
793
                return mga_dma_cleanup(dev);
794
        }
795
 
796
        return -EINVAL;
797
}
798
 
799
int mga_irq_install(drm_device_t *dev, int irq)
800
{
801
        int retcode;
802
 
803
        if (!irq)     return -EINVAL;
804
 
805
        down(&dev->struct_sem);
806
        if (dev->irq) {
807
                up(&dev->struct_sem);
808
                return -EBUSY;
809
        }
810
        dev->irq = irq;
811
        up(&dev->struct_sem);
812
 
813
        DRM_DEBUG("install irq handler %d\n", irq);
814
 
815
        dev->context_flag     = 0;
816
        dev->interrupt_flag   = 0;
817
        dev->dma_flag         = 0;
818
        dev->dma->next_buffer = NULL;
819
        dev->dma->next_queue  = NULL;
820
        dev->dma->this_buffer = NULL;
821
        INIT_LIST_HEAD(&dev->tq.list);
822
        dev->tq.sync          = 0;
823
        dev->tq.routine       = mga_dma_task_queue;
824
        dev->tq.data          = dev;
825
 
826
                                /* Before installing handler */
827
        MGA_WRITE(MGAREG_IEN, 0);
828
                                /* Install handler */
829
        if ((retcode = request_irq(dev->irq,
830
                                   mga_dma_service,
831
                                   SA_SHIRQ,
832
                                   dev->devname,
833
                                   dev))) {
834
                down(&dev->struct_sem);
835
                dev->irq = 0;
836
                up(&dev->struct_sem);
837
                return retcode;
838
        }
839
                                /* After installing handler */
840
        MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
841
        MGA_WRITE(MGAREG_IEN, 0x00000001);
842
        return 0;
843
}
844
 
845
int mga_irq_uninstall(drm_device_t *dev)
846
{
847
        int irq;
848
 
849
        down(&dev->struct_sem);
850
        irq      = dev->irq;
851
        dev->irq = 0;
852
        up(&dev->struct_sem);
853
 
854
        if (!irq) return -EINVAL;
855
        DRM_DEBUG("remove irq handler %d\n", irq);
856
        MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
857
        MGA_WRITE(MGAREG_IEN, 0);
858
        free_irq(irq, dev);
859
        return 0;
860
}
861
 
862
int mga_control(struct inode *inode, struct file *filp, unsigned int cmd,
863
                  unsigned long arg)
864
{
865
        drm_file_t      *priv   = filp->private_data;
866
        drm_device_t    *dev    = priv->dev;
867
        drm_control_t   ctl;
868
 
869
        if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
870
                return -EFAULT;
871
 
872
        switch (ctl.func) {
873
        case DRM_INST_HANDLER:
874
                return mga_irq_install(dev, ctl.irq);
875
        case DRM_UNINST_HANDLER:
876
                return mga_irq_uninstall(dev);
877
        default:
878
                return -EINVAL;
879
        }
880
}
881
 
882
static int mga_flush_queue(drm_device_t *dev)
883
{
884
        DECLARE_WAITQUEUE(entry, current);
885
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
886
        int ret = 0;
887
 
888
        if(!dev_priv) return 0;
889
 
890
        if(dev_priv->next_prim->num_dwords != 0) {
891
                add_wait_queue(&dev_priv->flush_queue, &entry);
892
                if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status))
893
                        DRM_ERROR("Incorrect mga_flush_queue logic\n");
894
                set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
895
                mga_dma_schedule(dev, 0);
896
                for (;;) {
897
                        current->state = TASK_INTERRUPTIBLE;
898
                        if (!test_bit(MGA_IN_FLUSH,
899
                                      &dev_priv->dispatch_status))
900
                                break;
901
                        atomic_inc(&dev->total_sleeps);
902
                        schedule();
903
                        if (signal_pending(current)) {
904
                                ret = -EINTR; /* Can't restart */
905
                                clear_bit(MGA_IN_FLUSH,
906
                                          &dev_priv->dispatch_status);
907
                                break;
908
                        }
909
                }
910
                current->state = TASK_RUNNING;
911
                remove_wait_queue(&dev_priv->flush_queue, &entry);
912
        }
913
        return ret;
914
}
915
 
916
/* Must be called with the lock held */
917
void mga_reclaim_buffers(drm_device_t *dev, pid_t pid)
918
{
919
        drm_device_dma_t *dma = dev->dma;
920
        int              i;
921
 
922
        if (!dma) return;
923
        if(dev->dev_private == NULL) return;
924
        if(dma->buflist == NULL) return;
925
 
926
        DRM_DEBUG("buf_count=%d\n", dma->buf_count);
927
 
928
        mga_flush_queue(dev);
929
 
930
        for (i = 0; i < dma->buf_count; i++) {
931
                drm_buf_t *buf = dma->buflist[ i ];
932
                drm_mga_buf_priv_t *buf_priv = buf->dev_private;
933
 
934
                /* Only buffers that need to get reclaimed ever
935
                 * get set to free
936
                 */
937
                if (buf->pid == pid  && buf_priv) {
938
                        if(buf_priv->my_freelist->age == MGA_BUF_USED)
939
                                buf_priv->my_freelist->age = MGA_BUF_FREE;
940
                }
941
        }
942
}
943
 
944
int mga_lock(struct inode *inode, struct file *filp, unsigned int cmd,
945
               unsigned long arg)
946
{
947
        drm_file_t        *priv   = filp->private_data;
948
        drm_device_t      *dev    = priv->dev;
949
        DECLARE_WAITQUEUE(entry, current);
950
        int               ret   = 0;
951
        drm_lock_t        lock;
952
 
953
        if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
954
                return -EFAULT;
955
 
956
        if (lock.context == DRM_KERNEL_CONTEXT) {
957
                DRM_ERROR("Process %d using kernel context %d\n",
958
                          current->pid, lock.context);
959
                return -EINVAL;
960
        }
961
 
962
        if (lock.context < 0) return -EINVAL;
963
 
964
        /* Only one queue:
965
         */
966
 
967
        if (!ret) {
968
                add_wait_queue(&dev->lock.lock_queue, &entry);
969
                for (;;) {
970
                        current->state = TASK_INTERRUPTIBLE;
971
                        if (!dev->lock.hw_lock) {
972
                                /* Device has been unregistered */
973
                                ret = -EINTR;
974
                                break;
975
                        }
976
                        if (drm_lock_take(&dev->lock.hw_lock->lock,
977
                                          lock.context)) {
978
                                dev->lock.pid       = current->pid;
979
                                dev->lock.lock_time = jiffies;
980
                                atomic_inc(&dev->total_locks);
981
                                break;  /* Got lock */
982
                        }
983
 
984
                                /* Contention */
985
                        atomic_inc(&dev->total_sleeps);
986
                        schedule();
987
                        if (signal_pending(current)) {
988
                                ret = -ERESTARTSYS;
989
                                break;
990
                        }
991
                }
992
                current->state = TASK_RUNNING;
993
                remove_wait_queue(&dev->lock.lock_queue, &entry);
994
        }
995
 
996
        if (!ret) {
997
                sigemptyset(&dev->sigmask);
998
                sigaddset(&dev->sigmask, SIGSTOP);
999
                sigaddset(&dev->sigmask, SIGTSTP);
1000
                sigaddset(&dev->sigmask, SIGTTIN);
1001
                sigaddset(&dev->sigmask, SIGTTOU);
1002
                dev->sigdata.context = lock.context;
1003
                dev->sigdata.lock    = dev->lock.hw_lock;
1004
                block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
1005
 
1006
                if (lock.flags & _DRM_LOCK_QUIESCENT) {
1007
                   DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
1008
                   mga_flush_queue(dev);
1009
                   mga_dma_quiescent(dev);
1010
                }
1011
        }
1012
 
1013
        if (ret) DRM_DEBUG("%d %s\n", lock.context,
1014
                           ret ? "interrupted" : "has lock");
1015
        return ret;
1016
}
1017
 
1018
int mga_flush_ioctl(struct inode *inode, struct file *filp,
1019
                    unsigned int cmd, unsigned long arg)
1020
{
1021
        drm_file_t        *priv   = filp->private_data;
1022
        drm_device_t      *dev    = priv->dev;
1023
        drm_lock_t        lock;
1024
        drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
1025
 
1026
        if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
1027
                return -EFAULT;
1028
 
1029
        if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1030
                DRM_ERROR("lock not held\n");
1031
                return -EINVAL;
1032
        }
1033
 
1034
        if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) {
1035
                drm_mga_prim_buf_t *temp_buf;
1036
 
1037
                temp_buf = dev_priv->current_prim;
1038
 
1039
                if(temp_buf && temp_buf->num_dwords) {
1040
                        set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status);
1041
                        mga_advance_primary(dev);
1042
                }
1043
                mga_dma_schedule(dev, 1);
1044
        }
1045
        if(lock.flags & _DRM_LOCK_QUIESCENT) {
1046
                mga_flush_queue(dev);
1047
                mga_dma_quiescent(dev);
1048
        }
1049
 
1050
        return 0;
1051
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.