OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [ieee1394/] [amdtp.c] - Blame information for rev 1774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/* -*- c-basic-offset: 8 -*-
2
 *
3
 * amdtp.c - Audio and Music Data Transmission Protocol Driver
4
 * Copyright (C) 2001 Kristian Høgsberg
5
 *
6
 * This program is free software; you can redistribute it and/or modify
7
 * it under the terms of the GNU General Public License as published by
8
 * the Free Software Foundation; either version 2 of the License, or
9
 * (at your option) any later version.
10
 *
11
 * This program is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
 * GNU General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU General Public License
17
 * along with this program; if not, write to the Free Software Foundation,
18
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19
 */
20
 
21
/* OVERVIEW
22
 * --------
23
 *
24
 * The AMDTP driver is designed to expose the IEEE1394 bus as a
25
 * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and
26
 * then your favourite MP3 player, game or whatever sound program will
27
 * output to an IEEE1394 isochronous channel.  The signal destination
28
 * could be a set of IEEE1394 loudspeakers (if and when such things
29
 * become available) or an amplifier with IEEE1394 input (like the
30
 * Sony STR-LSA1).  The driver only handles the actual streaming, some
31
 * connection management is also required for this to actually work.
32
 * That is outside the scope of this driver, and furthermore it is not
33
 * really standardized yet.
34
 *
35
 * The Audio and Music Data Tranmission Protocol is available at
36
 *
37
 *     http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf
38
 *
39
 *
40
 * TODO
41
 * ----
42
 *
43
 * - We should be able to change input sample format between LE/BE, as
44
 *   we already shift the bytes around when we construct the iso
45
 *   packets.
46
 *
47
 * - Fix DMA stop after bus reset!
48
 *
49
 * - Clean up iso context handling in ohci1394.
50
 *
51
 *
52
 * MAYBE TODO
53
 * ----------
54
 *
55
 * - Receive data for local playback or recording.  Playback requires
56
 *   soft syncing with the sound card.
57
 *
58
 * - Signal processing, i.e. receive packets, do some processing, and
59
 *   transmit them again using the same packet structure and timestamps
60
 *   offset by processing time.
61
 *
62
 * - Maybe make an ALSA interface, that is, create a file_ops
63
 *   implementation that recognizes ALSA ioctls and uses defaults for
64
 *   things that can't be controlled through ALSA (iso channel).
65
 *
66
 *   Changes:
67
 *
68
 * - Audit copy_from_user in amdtp_write.
69
 *                           Daniele Bellucci <bellucda@tiscali.it>
70
 *
71
 */
72
 
73
#include <linux/module.h>
74
#include <linux/list.h>
75
#include <linux/sched.h>
76
#include <linux/types.h>
77
#include <linux/fs.h>
78
#include <linux/ioctl.h>
79
#include <linux/wait.h>
80
#include <linux/pci.h>
81
#include <linux/interrupt.h>
82
#include <linux/poll.h>
83
#include <asm/uaccess.h>
84
#include <asm/atomic.h>
85
 
86
#include "hosts.h"
87
#include "highlevel.h"
88
#include "ieee1394.h"
89
#include "ieee1394_core.h"
90
#include "ohci1394.h"
91
 
92
#include "amdtp.h"
93
#include "cmp.h"
94
 
95
#define FMT_AMDTP 0x10
96
#define FDF_AM824 0x00
97
#define FDF_SFC_32KHZ   0x00
98
#define FDF_SFC_44K1HZ  0x01
99
#define FDF_SFC_48KHZ   0x02
100
#define FDF_SFC_88K2HZ  0x03
101
#define FDF_SFC_96KHZ   0x04
102
#define FDF_SFC_176K4HZ 0x05
103
#define FDF_SFC_192KHZ  0x06
104
 
105
struct descriptor_block {
106
        struct output_more_immediate {
107
                u32 control;
108
                u32 pad0;
109
                u32 skip;
110
                u32 pad1;
111
                u32 header[4];
112
        } header_desc;
113
 
114
        struct output_last {
115
                u32 control;
116
                u32 data_address;
117
                u32 branch;
118
                u32 status;
119
        } payload_desc;
120
};
121
 
122
struct packet {
123
        struct descriptor_block *db;
124
        dma_addr_t db_bus;
125
        struct iso_packet *payload;
126
        dma_addr_t payload_bus;
127
};
128
 
129
#include <asm/byteorder.h>
130
 
131
#if defined __BIG_ENDIAN_BITFIELD
132
 
133
struct iso_packet {
134
        /* First quadlet */
135
        unsigned int dbs      : 8;
136
        unsigned int eoh0     : 2;
137
        unsigned int sid      : 6;
138
 
139
        unsigned int dbc      : 8;
140
        unsigned int fn       : 2;
141
        unsigned int qpc      : 3;
142
        unsigned int sph      : 1;
143
        unsigned int reserved : 2;
144
 
145
        /* Second quadlet */
146
        unsigned int fdf      : 8;
147
        unsigned int eoh1     : 2;
148
        unsigned int fmt      : 6;
149
 
150
        unsigned int syt      : 16;
151
 
152
        quadlet_t data[0];
153
};
154
 
155
#elif defined __LITTLE_ENDIAN_BITFIELD
156
 
157
struct iso_packet {
158
        /* First quadlet */
159
        unsigned int sid      : 6;
160
        unsigned int eoh0     : 2;
161
        unsigned int dbs      : 8;
162
 
163
        unsigned int reserved : 2;
164
        unsigned int sph      : 1;
165
        unsigned int qpc      : 3;
166
        unsigned int fn       : 2;
167
        unsigned int dbc      : 8;
168
 
169
        /* Second quadlet */
170
        unsigned int fmt      : 6;
171
        unsigned int eoh1     : 2;
172
        unsigned int fdf      : 8;
173
 
174
        unsigned int syt      : 16;
175
 
176
        quadlet_t data[0];
177
};
178
 
179
#else
180
 
181
#error Unknown bitfield type
182
 
183
#endif
184
 
185
struct fraction {
186
        int integer;
187
        int numerator;
188
        int denominator;
189
};
190
 
191
#define PACKET_LIST_SIZE 256
192
#define MAX_PACKET_LISTS 4
193
 
194
struct packet_list {
195
        struct list_head link;
196
        int last_cycle_count;
197
        struct packet packets[PACKET_LIST_SIZE];
198
};
199
 
200
#define BUFFER_SIZE 128
201
 
202
/* This implements a circular buffer for incoming samples. */
203
 
204
struct buffer {
205
        size_t head, tail, length, size;
206
        unsigned char data[0];
207
};
208
 
209
struct stream {
210
        int iso_channel;
211
        int format;
212
        int rate;
213
        int dimension;
214
        int fdf;
215
        int mode;
216
        int sample_format;
217
        struct cmp_pcr *opcr;
218
 
219
        /* Input samples are copied here. */
220
        struct buffer *input;
221
 
222
        /* ISO Packer state */
223
        unsigned char dbc;
224
        struct packet_list *current_packet_list;
225
        int current_packet;
226
        struct fraction ready_samples, samples_per_cycle;
227
 
228
        /* We use these to generate control bits when we are packing
229
         * iec958 data.
230
         */
231
        int iec958_frame_count;
232
        int iec958_rate_code;
233
 
234
        /* The cycle_count and cycle_offset fields are used for the
235
         * synchronization timestamps (syt) in the cip header.  They
236
         * are incremented by at least a cycle every time we put a
237
         * time stamp in a packet.  As we don't time stamp all
238
         * packages, cycle_count isn't updated in every cycle, and
239
         * sometimes it's incremented by 2.  Thus, we have
240
         * cycle_count2, which is simply incremented by one with each
241
         * packet, so we can compare it to the transmission time
242
         * written back in the dma programs.
243
         */
244
        atomic_t cycle_count, cycle_count2;
245
        struct fraction cycle_offset, ticks_per_syt_offset;
246
        int syt_interval;
247
        int stale_count;
248
 
249
        /* Theses fields control the sample output to the DMA engine.
250
         * The dma_packet_lists list holds packet lists currently
251
         * queued for dma; the head of the list is currently being
252
         * processed.  The last program in a packet list generates an
253
         * interrupt, which removes the head from dma_packet_lists and
254
         * puts it back on the free list.
255
         */
256
        struct list_head dma_packet_lists;
257
        struct list_head free_packet_lists;
258
        wait_queue_head_t packet_list_wait;
259
        spinlock_t packet_list_lock;
260
        struct ohci1394_iso_tasklet iso_tasklet;
261
        struct pci_pool *descriptor_pool, *packet_pool;
262
 
263
        /* Streams at a host controller are chained through this field. */
264
        struct list_head link;
265
        struct amdtp_host *host;
266
};
267
 
268
struct amdtp_host {
269
        struct hpsb_host *host;
270
        struct ti_ohci *ohci;
271
        struct list_head stream_list;
272
        devfs_handle_t devfs;
273
        spinlock_t stream_list_lock;
274
};
275
 
276
static devfs_handle_t devfs_handle;
277
 
278
static struct hpsb_highlevel amdtp_highlevel;
279
 
280
/* FIXME: This doesn't belong here... */
281
 
282
#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000
283
#define OHCI1394_CONTEXT_RUN         0x00008000
284
#define OHCI1394_CONTEXT_WAKE        0x00001000
285
#define OHCI1394_CONTEXT_DEAD        0x00000800
286
#define OHCI1394_CONTEXT_ACTIVE      0x00000400
287
 
288
void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx,
289
                           dma_addr_t first_cmd, int z, int cycle_match)
290
{
291
        reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx);
292
        reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z);
293
        reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0);
294
        wmb();
295
        reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
296
                  OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) |
297
                  OHCI1394_CONTEXT_RUN);
298
}
299
 
300
void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx)
301
{
302
        reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16,
303
                  OHCI1394_CONTEXT_WAKE);
304
}
305
 
306
void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous)
307
{
308
        u32 control;
309
        int wait;
310
 
311
        reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx);
312
        reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16,
313
                  OHCI1394_CONTEXT_RUN);
314
        wmb();
315
 
316
        if (synchronous) {
317
                for (wait = 0; wait < 5; wait++) {
318
                        control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16);
319
                        if ((control & OHCI1394_CONTEXT_ACTIVE) == 0)
320
                                break;
321
 
322
                        set_current_state(TASK_INTERRUPTIBLE);
323
                        schedule_timeout(1);
324
                }
325
        }
326
}
327
 
328
/* Note: we can test if free_packet_lists is empty without aquiring
329
 * the packet_list_lock.  The interrupt handler only adds to the free
330
 * list, there is no race condition between testing the list non-empty
331
 * and acquiring the lock.
332
 */
333
 
334
static struct packet_list *stream_get_free_packet_list(struct stream *s)
335
{
336
        struct packet_list *pl;
337
        unsigned long flags;
338
 
339
        if (list_empty(&s->free_packet_lists))
340
                return NULL;
341
 
342
        spin_lock_irqsave(&s->packet_list_lock, flags);
343
        pl = list_entry(s->free_packet_lists.next, struct packet_list, link);
344
        list_del(&pl->link);
345
        spin_unlock_irqrestore(&s->packet_list_lock, flags);
346
 
347
        return pl;
348
}
349
 
350
static void stream_start_dma(struct stream *s, struct packet_list *pl)
351
{
352
        u32 syt_cycle, cycle_count, start_cycle;
353
 
354
        cycle_count = reg_read(s->host->ohci,
355
                               OHCI1394_IsochronousCycleTimer) >> 12;
356
        syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f;
357
 
358
        /* We program the DMA controller to start transmission at
359
         * least 17 cycles from now - this happens when the lower four
360
         * bits of cycle_count is 0x0f and syt_cycle is 0, in this
361
         * case the start cycle is cycle_count - 15 + 32. */
362
        start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle;
363
        if ((start_cycle & 0x1fff) >= 8000)
364
                start_cycle = start_cycle - 8000 + 0x2000;
365
 
366
        ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context,
367
                              pl->packets[0].db_bus, 3,
368
                              start_cycle & 0x7fff);
369
}
370
 
371
static void stream_put_dma_packet_list(struct stream *s,
372
                                       struct packet_list *pl)
373
{
374
        unsigned long flags;
375
        struct packet_list *prev;
376
 
377
        /* Remember the cycle_count used for timestamping the last packet. */
378
        pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1;
379
        pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0;
380
 
381
        spin_lock_irqsave(&s->packet_list_lock, flags);
382
        list_add_tail(&pl->link, &s->dma_packet_lists);
383
        spin_unlock_irqrestore(&s->packet_list_lock, flags);
384
 
385
        prev = list_entry(pl->link.prev, struct packet_list, link);
386
        if (pl->link.prev != &s->dma_packet_lists) {
387
                struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1];
388
                last->db->payload_desc.branch = pl->packets[0].db_bus | 3;
389
                last->db->header_desc.skip = pl->packets[0].db_bus | 3;
390
                ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context);
391
        }
392
        else
393
                stream_start_dma(s, pl);
394
}
395
 
396
static void stream_shift_packet_lists(unsigned long l)
397
{
398
        struct stream *s = (struct stream *) l;
399
        struct packet_list *pl;
400
        struct packet *last;
401
        int diff;
402
 
403
        if (list_empty(&s->dma_packet_lists)) {
404
                HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__);
405
                return;
406
        }
407
 
408
        /* Now that we know the list is non-empty, we can get the head
409
         * of the list without locking, because the process context
410
         * only adds to the tail.
411
         */
412
        pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
413
        last = &pl->packets[PACKET_LIST_SIZE - 1];
414
 
415
        /* This is weird... if we stop dma processing in the middle of
416
         * a packet list, the dma context immediately generates an
417
         * interrupt if we enable it again later.  This only happens
418
         * when amdtp_release is interrupted while waiting for dma to
419
         * complete, though.  Anyway, we detect this by seeing that
420
         * the status of the dma descriptor that we expected an
421
         * interrupt from is still 0.
422
         */
423
        if (last->db->payload_desc.status == 0) {
424
                HPSB_INFO("weird interrupt...");
425
                return;
426
        }
427
 
428
        /* If the last descriptor block does not specify a branch
429
         * address, we have a sample underflow.
430
         */
431
        if (last->db->payload_desc.branch == 0)
432
                HPSB_INFO("FIXME: sample underflow...");
433
 
434
        /* Here we check when (which cycle) the last packet was sent
435
         * and compare it to what the iso packer was using at the
436
         * time.  If there is a mismatch, we adjust the cycle count in
437
         * the iso packer.  However, there are still up to
438
         * MAX_PACKET_LISTS packet lists queued with bad time stamps,
439
         * so we disable time stamp monitoring for the next
440
         * MAX_PACKET_LISTS packet lists.
441
         */
442
        diff = (last->db->payload_desc.status - pl->last_cycle_count) & 0xf;
443
        if (diff > 0 && s->stale_count == 0) {
444
                atomic_add(diff, &s->cycle_count);
445
                atomic_add(diff, &s->cycle_count2);
446
                s->stale_count = MAX_PACKET_LISTS;
447
        }
448
 
449
        if (s->stale_count > 0)
450
                s->stale_count--;
451
 
452
        /* Finally, we move the packet list that was just processed
453
         * back to the free list, and notify any waiters.
454
         */
455
        spin_lock(&s->packet_list_lock);
456
        list_del(&pl->link);
457
        list_add_tail(&pl->link, &s->free_packet_lists);
458
        spin_unlock(&s->packet_list_lock);
459
 
460
        wake_up_interruptible(&s->packet_list_wait);
461
}
462
 
463
static struct packet *stream_current_packet(struct stream *s)
464
{
465
        if (s->current_packet_list == NULL &&
466
            (s->current_packet_list = stream_get_free_packet_list(s)) == NULL)
467
                return NULL;
468
 
469
        return &s->current_packet_list->packets[s->current_packet];
470
}
471
 
472
static void stream_queue_packet(struct stream *s)
473
{
474
        s->current_packet++;
475
        if (s->current_packet == PACKET_LIST_SIZE) {
476
                stream_put_dma_packet_list(s, s->current_packet_list);
477
                s->current_packet_list = NULL;
478
                s->current_packet = 0;
479
        }
480
}
481
 
482
/* Integer fractional math.  When we transmit a 44k1Hz signal we must
483
 * send 5 41/80 samples per isochronous cycle, as these occur 8000
484
 * times a second.  Of course, we must send an integral number of
485
 * samples in a packet, so we use the integer math to alternate
486
 * between sending 5 and 6 samples per packet.
487
 */
488
 
489
static void fraction_init(struct fraction *f, int numerator, int denominator)
490
{
491
        f->integer = numerator / denominator;
492
        f->numerator = numerator % denominator;
493
        f->denominator = denominator;
494
}
495
 
496
static __inline__ void fraction_add(struct fraction *dst,
497
                                    struct fraction *src1,
498
                                    struct fraction *src2)
499
{
500
        /* assert: src1->denominator == src2->denominator */
501
 
502
        int sum, denom;
503
 
504
        /* We use these two local variables to allow gcc to optimize
505
         * the division and the modulo into only one division. */
506
 
507
        sum = src1->numerator + src2->numerator;
508
        denom = src1->denominator;
509
        dst->integer = src1->integer + src2->integer + sum / denom;
510
        dst->numerator = sum % denom;
511
        dst->denominator = denom;
512
}
513
 
514
static __inline__ void fraction_sub_int(struct fraction *dst,
515
                                        struct fraction *src, int integer)
516
{
517
        dst->integer = src->integer - integer;
518
        dst->numerator = src->numerator;
519
        dst->denominator = src->denominator;
520
}
521
 
522
static __inline__ int fraction_floor(struct fraction *frac)
523
{
524
        return frac->integer;
525
}
526
 
527
static __inline__ int fraction_ceil(struct fraction *frac)
528
{
529
        return frac->integer + (frac->numerator > 0 ? 1 : 0);
530
}
531
 
532
void packet_initialize(struct packet *p, struct packet *next)
533
{
534
        /* Here we initialize the dma descriptor block for
535
         * transferring one iso packet.  We use two descriptors per
536
         * packet: an OUTPUT_MORE_IMMMEDIATE descriptor for the
537
         * IEEE1394 iso packet header and an OUTPUT_LAST descriptor
538
         * for the payload.
539
         */
540
 
541
        p->db->header_desc.control =
542
                DMA_CTL_OUTPUT_MORE | DMA_CTL_IMMEDIATE | 8;
543
 
544
        if (next) {
545
                p->db->payload_desc.control =
546
                        DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH;
547
                p->db->payload_desc.branch = next->db_bus | 3;
548
                p->db->header_desc.skip = next->db_bus | 3;
549
        }
550
        else {
551
                p->db->payload_desc.control =
552
                        DMA_CTL_OUTPUT_LAST | DMA_CTL_BRANCH |
553
                        DMA_CTL_UPDATE | DMA_CTL_IRQ;
554
                p->db->payload_desc.branch = 0;
555
                p->db->header_desc.skip = 0;
556
        }
557
        p->db->payload_desc.data_address = p->payload_bus;
558
        p->db->payload_desc.status = 0;
559
}
560
 
561
struct packet_list *packet_list_alloc(struct stream *s)
562
{
563
        int i;
564
        struct packet_list *pl;
565
        struct packet *next;
566
 
567
        pl = kmalloc(sizeof *pl, SLAB_KERNEL);
568
        if (pl == NULL)
569
                return NULL;
570
 
571
        for (i = 0; i < PACKET_LIST_SIZE; i++) {
572
                struct packet *p = &pl->packets[i];
573
                p->db = pci_pool_alloc(s->descriptor_pool, SLAB_KERNEL,
574
                                       &p->db_bus);
575
                p->payload = pci_pool_alloc(s->packet_pool, SLAB_KERNEL,
576
                                            &p->payload_bus);
577
        }
578
 
579
        for (i = 0; i < PACKET_LIST_SIZE; i++) {
580
                if (i < PACKET_LIST_SIZE - 1)
581
                        next = &pl->packets[i + 1];
582
                else
583
                        next = NULL;
584
                packet_initialize(&pl->packets[i], next);
585
        }
586
 
587
        return pl;
588
}
589
 
590
void packet_list_free(struct packet_list *pl, struct stream *s)
591
{
592
        int i;
593
 
594
        for (i = 0; i < PACKET_LIST_SIZE; i++) {
595
                struct packet *p = &pl->packets[i];
596
                pci_pool_free(s->descriptor_pool, p->db, p->db_bus);
597
                pci_pool_free(s->packet_pool, p->payload, p->payload_bus);
598
        }
599
        kfree(pl);
600
}
601
 
602
static struct buffer *buffer_alloc(int size)
603
{
604
        struct buffer *b;
605
 
606
        b = kmalloc(sizeof *b + size, SLAB_KERNEL);
607
        if (b == NULL)
608
                return NULL;
609
        b->head = 0;
610
        b->tail = 0;
611
        b->length = 0;
612
        b->size = size;
613
 
614
        return b;
615
}
616
 
617
static unsigned char *buffer_get_bytes(struct buffer *buffer, int size)
618
{
619
        unsigned char *p;
620
 
621
        if (buffer->head + size > buffer->size)
622
                BUG();
623
 
624
        p = &buffer->data[buffer->head];
625
        buffer->head += size;
626
        if (buffer->head == buffer->size)
627
                buffer->head = 0;
628
        buffer->length -= size;
629
 
630
        return p;
631
}
632
 
633
static unsigned char *buffer_put_bytes(struct buffer *buffer,
634
                                       size_t max, size_t *actual)
635
{
636
        size_t length;
637
        unsigned char *p;
638
 
639
        p = &buffer->data[buffer->tail];
640
        length = min(buffer->size - buffer->length, max);
641
        if (buffer->tail + length < buffer->size) {
642
                *actual = length;
643
                buffer->tail += length;
644
        }
645
        else {
646
                *actual = buffer->size - buffer->tail;
647
                 buffer->tail = 0;
648
        }
649
 
650
        buffer->length += *actual;
651
        return p;
652
}
653
 
654
static u32 get_iec958_header_bits(struct stream *s, int sub_frame, u32 sample)
655
{
656
        int csi, parity, shift;
657
        int block_start;
658
        u32 bits;
659
 
660
        switch (s->iec958_frame_count) {
661
        case 1:
662
                csi = s->format == AMDTP_FORMAT_IEC958_AC3;
663
                break;
664
        case 2:
665
        case 9:
666
                csi = 1;
667
                break;
668
        case 24 ... 27:
669
                csi = (s->iec958_rate_code >> (27 - s->iec958_frame_count)) & 0x01;
670
                break;
671
        default:
672
                csi = 0;
673
                break;
674
        }
675
 
676
        block_start = (s->iec958_frame_count == 0 && sub_frame == 0);
677
 
678
        /* The parity bit is the xor of the sample bits and the
679
         * channel status info bit. */
680
        for (shift = 16, parity = sample ^ csi; shift > 0; shift >>= 1)
681
                parity ^= (parity >> shift);
682
 
683
        bits =  (block_start << 5) |            /* Block start bit */
684
                ((sub_frame == 0) << 4) |        /* Subframe bit */
685
                ((parity & 1) << 3) |           /* Parity bit */
686
                (csi << 2);                     /* Channel status info bit */
687
 
688
        return bits;
689
}
690
 
691
static u32 get_header_bits(struct stream *s, int sub_frame, u32 sample)
692
{
693
        switch (s->format) {
694
        case AMDTP_FORMAT_IEC958_PCM:
695
        case AMDTP_FORMAT_IEC958_AC3:
696
                return get_iec958_header_bits(s, sub_frame, sample);
697
 
698
        case AMDTP_FORMAT_RAW:
699
                return 0x40;
700
 
701
        default:
702
                return 0;
703
        }
704
}
705
 
706
static void fill_payload_le16(struct stream *s, quadlet_t *data, int nevents)
707
{
708
        quadlet_t *event, sample, bits;
709
        unsigned char *p;
710
        int i, j;
711
 
712
        for (i = 0, event = data; i < nevents; i++) {
713
 
714
                for (j = 0; j < s->dimension; j++) {
715
                        p = buffer_get_bytes(s->input, 2);
716
                        sample = (p[1] << 16) | (p[0] << 8);
717
                        bits = get_header_bits(s, j, sample);
718
                        event[j] = cpu_to_be32((bits << 24) | sample);
719
                }
720
 
721
                event += s->dimension;
722
                if (++s->iec958_frame_count == 192)
723
                        s->iec958_frame_count = 0;
724
        }
725
}
726
 
727
static void fill_packet(struct stream *s, struct packet *packet, int nevents)
728
{
729
        int syt_index, syt, size;
730
        u32 control;
731
 
732
        size = (nevents * s->dimension + 2) * sizeof(quadlet_t);
733
 
734
        /* Update DMA descriptors */
735
        packet->db->payload_desc.status = 0;
736
        control = packet->db->payload_desc.control & 0xffff0000;
737
        packet->db->payload_desc.control = control | size;
738
 
739
        /* Fill IEEE1394 headers */
740
        packet->db->header_desc.header[0] =
741
                (IEEE1394_SPEED_100 << 16) | (0x01 << 14) |
742
                (s->iso_channel << 8) | (TCODE_ISO_DATA << 4);
743
        packet->db->header_desc.header[1] = size << 16;
744
 
745
        /* Calculate synchronization timestamp (syt). First we
746
         * determine syt_index, that is, the index in the packet of
747
         * the sample for which the timestamp is valid. */
748
        syt_index = (s->syt_interval - s->dbc) & (s->syt_interval - 1);
749
        if (syt_index < nevents) {
750
                syt = ((atomic_read(&s->cycle_count) << 12) |
751
                       s->cycle_offset.integer) & 0xffff;
752
                fraction_add(&s->cycle_offset,
753
                             &s->cycle_offset, &s->ticks_per_syt_offset);
754
 
755
                /* This next addition should be modulo 8000 (0x1f40),
756
                 * but we only use the lower 4 bits of cycle_count, so
757
                 * we don't need the modulo. */
758
                atomic_add(s->cycle_offset.integer / 3072, &s->cycle_count);
759
                s->cycle_offset.integer %= 3072;
760
        }
761
        else
762
                syt = 0xffff;
763
 
764
        atomic_inc(&s->cycle_count2);
765
 
766
        /* Fill cip header */
767
        packet->payload->eoh0 = 0;
768
        packet->payload->sid = s->host->host->node_id & 0x3f;
769
        packet->payload->dbs = s->dimension;
770
        packet->payload->fn = 0;
771
        packet->payload->qpc = 0;
772
        packet->payload->sph = 0;
773
        packet->payload->reserved = 0;
774
        packet->payload->dbc = s->dbc;
775
        packet->payload->eoh1 = 2;
776
        packet->payload->fmt = FMT_AMDTP;
777
        packet->payload->fdf = s->fdf;
778
        packet->payload->syt = cpu_to_be16(syt);
779
 
780
        switch (s->sample_format) {
781
        case AMDTP_INPUT_LE16:
782
                fill_payload_le16(s, packet->payload->data, nevents);
783
                break;
784
        }
785
 
786
        s->dbc += nevents;
787
}
788
 
789
static void stream_flush(struct stream *s)
790
{
791
        struct packet *p;
792
        int nevents;
793
        struct fraction next;
794
 
795
        /* The AMDTP specifies two transmission modes: blocking and
796
         * non-blocking.  In blocking mode you always transfer
797
         * syt_interval or zero samples, whereas in non-blocking mode
798
         * you send as many samples as you have available at transfer
799
         * time.
800
         *
801
         * The fraction samples_per_cycle specifies the number of
802
         * samples that become available per cycle.  We add this to
803
         * the fraction ready_samples, which specifies the number of
804
         * leftover samples from the previous transmission.  The sum,
805
         * stored in the fraction next, specifies the number of
806
         * samples available for transmission, and from this we
807
         * determine the number of samples to actually transmit.
808
         */
809
 
810
        while (1) {
811
                fraction_add(&next, &s->ready_samples, &s->samples_per_cycle);
812
                if (s->mode == AMDTP_MODE_BLOCKING) {
813
                        if (fraction_floor(&next) >= s->syt_interval)
814
                                nevents = s->syt_interval;
815
                        else
816
                                nevents = 0;
817
                }
818
                else
819
                        nevents = fraction_floor(&next);
820
 
821
                p = stream_current_packet(s);
822
                if (s->input->length < nevents * s->dimension * 2 || p == NULL)
823
                        break;
824
 
825
                fill_packet(s, p, nevents);
826
                stream_queue_packet(s);
827
 
828
                /* Now that we have successfully queued the packet for
829
                 * transmission, we update the fraction ready_samples. */
830
                fraction_sub_int(&s->ready_samples, &next, nevents);
831
        }
832
}
833
 
834
static int stream_alloc_packet_lists(struct stream *s)
835
{
836
        int max_nevents, max_packet_size, i;
837
 
838
        if (s->mode == AMDTP_MODE_BLOCKING)
839
                max_nevents = s->syt_interval;
840
        else
841
                max_nevents = fraction_ceil(&s->samples_per_cycle);
842
 
843
        max_packet_size = max_nevents * s->dimension * 4 + 8;
844
        s->packet_pool = pci_pool_create("packet pool", s->host->ohci->dev,
845
                                         max_packet_size, 0, 0 ,SLAB_KERNEL);
846
 
847
        if (s->packet_pool == NULL)
848
                return -1;
849
 
850
        INIT_LIST_HEAD(&s->free_packet_lists);
851
        INIT_LIST_HEAD(&s->dma_packet_lists);
852
        for (i = 0; i < MAX_PACKET_LISTS; i++) {
853
                struct packet_list *pl = packet_list_alloc(s);
854
                if (pl == NULL)
855
                        break;
856
                list_add_tail(&pl->link, &s->free_packet_lists);
857
        }
858
 
859
        return i < MAX_PACKET_LISTS ? -1 : 0;
860
}
861
 
862
static void stream_free_packet_lists(struct stream *s)
863
{
864
        struct list_head *lh, *next;
865
 
866
        if (s->current_packet_list != NULL)
867
                packet_list_free(s->current_packet_list, s);
868
        list_for_each_safe(lh, next, &s->dma_packet_lists)
869
                packet_list_free(list_entry(lh, struct packet_list, link), s);
870
        list_for_each_safe(lh, next, &s->free_packet_lists)
871
                packet_list_free(list_entry(lh, struct packet_list, link), s);
872
        if (s->packet_pool != NULL)
873
                pci_pool_destroy(s->packet_pool);
874
 
875
        s->current_packet_list = NULL;
876
        INIT_LIST_HEAD(&s->free_packet_lists);
877
        INIT_LIST_HEAD(&s->dma_packet_lists);
878
        s->packet_pool = NULL;
879
}
880
 
881
static void plug_update(struct cmp_pcr *plug, void *data)
882
{
883
        struct stream *s = data;
884
 
885
        HPSB_INFO("plug update: p2p_count=%d, channel=%d",
886
                  plug->p2p_count, plug->channel);
887
        s->iso_channel = plug->channel;
888
        if (plug->p2p_count > 0) {
889
                struct packet_list *pl;
890
 
891
                pl = list_entry(s->dma_packet_lists.next, struct packet_list, link);
892
                stream_start_dma(s, pl);
893
        }
894
        else {
895
                ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 0);
896
        }
897
}
898
 
899
static int stream_configure(struct stream *s, int cmd, struct amdtp_ioctl *cfg)
900
{
901
        const int transfer_delay = 9000;
902
 
903
        if (cfg->format <= AMDTP_FORMAT_IEC958_AC3)
904
                s->format = cfg->format;
905
        else
906
                return -EINVAL;
907
 
908
        switch (cfg->rate) {
909
        case 32000:
910
                s->syt_interval = 8;
911
                s->fdf = FDF_SFC_32KHZ;
912
                s->iec958_rate_code = 0x0c;
913
                break;
914
        case 44100:
915
                s->syt_interval = 8;
916
                s->fdf = FDF_SFC_44K1HZ;
917
                s->iec958_rate_code = 0x00;
918
                break;
919
        case 48000:
920
                s->syt_interval = 8;
921
                s->fdf = FDF_SFC_48KHZ;
922
                s->iec958_rate_code = 0x04;
923
                break;
924
        case 88200:
925
                s->syt_interval = 16;
926
                s->fdf = FDF_SFC_88K2HZ;
927
                s->iec958_rate_code = 0x00;
928
                break;
929
        case 96000:
930
                s->syt_interval = 16;
931
                s->fdf = FDF_SFC_96KHZ;
932
                s->iec958_rate_code = 0x00;
933
                break;
934
        case 176400:
935
                s->syt_interval = 32;
936
                s->fdf = FDF_SFC_176K4HZ;
937
                s->iec958_rate_code = 0x00;
938
                break;
939
        case 192000:
940
                s->syt_interval = 32;
941
                s->fdf = FDF_SFC_192KHZ;
942
                s->iec958_rate_code = 0x00;
943
                break;
944
 
945
        default:
946
                return -EINVAL;
947
        }
948
 
949
        s->rate = cfg->rate;
950
        fraction_init(&s->samples_per_cycle, s->rate, 8000);
951
        fraction_init(&s->ready_samples, 0, 8000);
952
 
953
        /* The ticks_per_syt_offset is initialized to the number of
954
         * ticks between syt_interval events.  The number of ticks per
955
         * second is 24.576e6, so the number of ticks between
956
         * syt_interval events is 24.576e6 * syt_interval / rate.
957
         */
958
        fraction_init(&s->ticks_per_syt_offset,
959
                      24576000 * s->syt_interval, s->rate);
960
        fraction_init(&s->cycle_offset, (transfer_delay % 3072) * s->rate, s->rate);
961
        atomic_set(&s->cycle_count, transfer_delay / 3072);
962
        atomic_set(&s->cycle_count2, 0);
963
 
964
        s->mode = cfg->mode;
965
        s->sample_format = AMDTP_INPUT_LE16;
966
 
967
        /* When using the AM824 raw subformat we can stream signals of
968
         * any dimension.  The IEC958 subformat, however, only
969
         * supports 2 channels.
970
         */
971
        if (s->format == AMDTP_FORMAT_RAW || cfg->dimension == 2)
972
                s->dimension = cfg->dimension;
973
        else
974
                return -EINVAL;
975
 
976
        if (s->opcr != NULL) {
977
                cmp_unregister_opcr(s->host->host, s->opcr);
978
                s->opcr = NULL;
979
        }
980
 
981
        switch(cmd) {
982
        case AMDTP_IOC_PLUG:
983
                s->opcr = cmp_register_opcr(s->host->host, cfg->u.plug,
984
                                           /*payload*/ 12, plug_update, s);
985
                if (s->opcr == NULL)
986
                        return -EINVAL;
987
                s->iso_channel = s->opcr->channel;
988
                break;
989
 
990
        case AMDTP_IOC_CHANNEL:
991
                if (cfg->u.channel >= 0 && cfg->u.channel < 64)
992
                        s->iso_channel = cfg->u.channel;
993
                else
994
                        return -EINVAL;
995
                break;
996
        }
997
 
998
        /* The ioctl settings were all valid, so we realloc the packet
999
         * lists to make sure the packet size is big enough.
1000
         */
1001
        if (s->packet_pool != NULL)
1002
                stream_free_packet_lists(s);
1003
 
1004
        if (stream_alloc_packet_lists(s) < 0) {
1005
                stream_free_packet_lists(s);
1006
                return -ENOMEM;
1007
        }
1008
 
1009
        return 0;
1010
}
1011
 
1012
struct stream *stream_alloc(struct amdtp_host *host)
1013
{
1014
        struct stream *s;
1015
        unsigned long flags;
1016
 
1017
        s = kmalloc(sizeof(struct stream), SLAB_KERNEL);
1018
        if (s == NULL)
1019
                return NULL;
1020
 
1021
        memset(s, 0, sizeof(struct stream));
1022
        s->host = host;
1023
 
1024
        s->input = buffer_alloc(BUFFER_SIZE);
1025
        if (s->input == NULL) {
1026
                kfree(s);
1027
                return NULL;
1028
        }
1029
 
1030
        s->descriptor_pool = pci_pool_create("descriptor pool", host->ohci->dev,
1031
                                             sizeof(struct descriptor_block),
1032
                                             16, 0, SLAB_KERNEL);
1033
 
1034
        if (s->descriptor_pool == NULL) {
1035
                kfree(s->input);
1036
                kfree(s);
1037
                return NULL;
1038
        }
1039
 
1040
        INIT_LIST_HEAD(&s->free_packet_lists);
1041
        INIT_LIST_HEAD(&s->dma_packet_lists);
1042
 
1043
        init_waitqueue_head(&s->packet_list_wait);
1044
        spin_lock_init(&s->packet_list_lock);
1045
 
1046
        ohci1394_init_iso_tasklet(&s->iso_tasklet, OHCI_ISO_TRANSMIT,
1047
                                  stream_shift_packet_lists,
1048
                                  (unsigned long) s);
1049
 
1050
        if (ohci1394_register_iso_tasklet(host->ohci, &s->iso_tasklet) < 0) {
1051
                pci_pool_destroy(s->descriptor_pool);
1052
                kfree(s->input);
1053
                kfree(s);
1054
                return NULL;
1055
        }
1056
 
1057
        spin_lock_irqsave(&host->stream_list_lock, flags);
1058
        list_add_tail(&s->link, &host->stream_list);
1059
        spin_unlock_irqrestore(&host->stream_list_lock, flags);
1060
 
1061
        return s;
1062
}
1063
 
1064
void stream_free(struct stream *s)
1065
{
1066
        unsigned long flags;
1067
 
1068
        /* Stop the DMA.  We wait for the dma packet list to become
1069
         * empty and let the dma controller run out of programs.  This
1070
         * seems to be more reliable than stopping it directly, since
1071
         * that sometimes generates an it transmit interrupt if we
1072
         * later re-enable the context.
1073
         */
1074
        wait_event_interruptible(s->packet_list_wait,
1075
                                 list_empty(&s->dma_packet_lists));
1076
 
1077
        ohci1394_stop_it_ctx(s->host->ohci, s->iso_tasklet.context, 1);
1078
        ohci1394_unregister_iso_tasklet(s->host->ohci, &s->iso_tasklet);
1079
 
1080
        if (s->opcr != NULL)
1081
                cmp_unregister_opcr(s->host->host, s->opcr);
1082
 
1083
        spin_lock_irqsave(&s->host->stream_list_lock, flags);
1084
        list_del(&s->link);
1085
        spin_unlock_irqrestore(&s->host->stream_list_lock, flags);
1086
 
1087
        kfree(s->input);
1088
 
1089
        stream_free_packet_lists(s);
1090
        pci_pool_destroy(s->descriptor_pool);
1091
 
1092
        kfree(s);
1093
}
1094
 
1095
/* File operations */
1096
 
1097
static ssize_t amdtp_write(struct file *file, const char *buffer, size_t count,
1098
                           loff_t *offset_is_ignored)
1099
{
1100
        struct stream *s = file->private_data;
1101
        unsigned char *p;
1102
        int i;
1103
        size_t length;
1104
 
1105
        if (s->packet_pool == NULL)
1106
                return -EBADFD;
1107
 
1108
        /* Fill the circular buffer from the input buffer and call the
1109
         * iso packer when the buffer is full.  The iso packer may
1110
         * leave bytes in the buffer for two reasons: either the
1111
         * remaining bytes wasn't enough to build a new packet, or
1112
         * there were no free packet lists.  In the first case we
1113
         * re-fill the buffer and call the iso packer again or return
1114
         * if we used all the data from userspace.  In the second
1115
         * case, the wait_event_interruptible will block until the irq
1116
         * handler frees a packet list.
1117
         */
1118
 
1119
        for (i = 0; i < count; i += length) {
1120
                p = buffer_put_bytes(s->input, count - i, &length);
1121
                if (copy_from_user(p, buffer + i, length))
1122
                        return -EFAULT;
1123
                if (s->input->length < s->input->size)
1124
                        continue;
1125
 
1126
                stream_flush(s);
1127
 
1128
                if (s->current_packet_list != NULL)
1129
                        continue;
1130
 
1131
                if (file->f_flags & O_NONBLOCK)
1132
                        return i + length > 0 ? i + length : -EAGAIN;
1133
 
1134
                if (wait_event_interruptible(s->packet_list_wait,
1135
                                             !list_empty(&s->free_packet_lists)))
1136
                        return -EINTR;
1137
        }
1138
 
1139
        return count;
1140
}
1141
 
1142
static int amdtp_ioctl(struct inode *inode, struct file *file,
1143
                           unsigned int cmd, unsigned long arg)
1144
{
1145
        struct stream *s = file->private_data;
1146
        struct amdtp_ioctl cfg;
1147
 
1148
        switch(cmd)
1149
        {
1150
        case AMDTP_IOC_PLUG:
1151
        case AMDTP_IOC_CHANNEL:
1152
                if (copy_from_user(&cfg, (struct amdtp_ioctl *) arg, sizeof cfg))
1153
                        return -EFAULT;
1154
                else
1155
                        return stream_configure(s, cmd, &cfg);
1156
 
1157
        default:
1158
                return -EINVAL;
1159
        }
1160
}
1161
 
1162
static unsigned int amdtp_poll(struct file *file, poll_table *pt)
1163
{
1164
        struct stream *s = file->private_data;
1165
 
1166
        poll_wait(file, &s->packet_list_wait, pt);
1167
 
1168
        if (!list_empty(&s->free_packet_lists))
1169
                return POLLOUT | POLLWRNORM;
1170
        else
1171
                return 0;
1172
}
1173
 
1174
static int amdtp_open(struct inode *inode, struct file *file)
1175
{
1176
        struct amdtp_host *host;
1177
        int i = ieee1394_file_to_instance(file);
1178
 
1179
        host = hpsb_get_hostinfo_bykey(&amdtp_highlevel, i);
1180
        if (host == NULL)
1181
                return -ENODEV;
1182
 
1183
        file->private_data = stream_alloc(host);
1184
        if (file->private_data == NULL)
1185
                return -ENOMEM;
1186
 
1187
        return 0;
1188
}
1189
 
1190
static int amdtp_release(struct inode *inode, struct file *file)
1191
{
1192
        struct stream *s = file->private_data;
1193
 
1194
        stream_free(s);
1195
 
1196
        return 0;
1197
}
1198
 
1199
static struct file_operations amdtp_fops =
1200
{
1201
        .owner =        THIS_MODULE,
1202
        .write =        amdtp_write,
1203
        .poll =         amdtp_poll,
1204
        .ioctl =        amdtp_ioctl,
1205
        .open =         amdtp_open,
1206
        .release =      amdtp_release
1207
};
1208
 
1209
/* IEEE1394 Subsystem functions */
1210
 
1211
static void amdtp_add_host(struct hpsb_host *host)
1212
{
1213
        struct amdtp_host *ah;
1214
        int minor;
1215
        char name[16];
1216
 
1217
        if (strcmp(host->driver->name, OHCI1394_DRIVER_NAME) != 0)
1218
                return;
1219
 
1220
        ah = hpsb_create_hostinfo(&amdtp_highlevel, host, sizeof(*ah));
1221
        if (!ah) {
1222
                HPSB_ERR("amdtp: Unable able to alloc hostinfo");
1223
                return;
1224
        }
1225
 
1226
        ah->host = host;
1227
        ah->ohci = host->hostdata;
1228
 
1229
        hpsb_set_hostinfo_key(&amdtp_highlevel, host, ah->ohci->id);
1230
 
1231
        minor = IEEE1394_MINOR_BLOCK_AMDTP * 16 + ah->ohci->id;
1232
 
1233
        sprintf(name, "%d", ah->ohci->id);
1234
 
1235
        INIT_LIST_HEAD(&ah->stream_list);
1236
        spin_lock_init(&ah->stream_list_lock);
1237
 
1238
        ah->devfs = devfs_register(devfs_handle, name,
1239
                                   DEVFS_FL_AUTO_OWNER,
1240
                                   IEEE1394_MAJOR, minor,
1241
                                   S_IFCHR | S_IRUSR | S_IWUSR,
1242
                                   &amdtp_fops, NULL);
1243
}
1244
 
1245
static void amdtp_remove_host(struct hpsb_host *host)
1246
{
1247
        struct amdtp_host *ah = hpsb_get_hostinfo(&amdtp_highlevel, host);
1248
 
1249
        if (ah)
1250
                devfs_unregister(ah->devfs);
1251
 
1252
        return;
1253
}
1254
 
1255
static struct hpsb_highlevel amdtp_highlevel = {
1256
        .name =         "amdtp",
1257
        .add_host =     amdtp_add_host,
1258
        .remove_host =  amdtp_remove_host,
1259
};
1260
 
1261
/* Module interface */
1262
 
1263
MODULE_AUTHOR("Kristian Hogsberg <hogsberg@users.sf.net>");
1264
MODULE_DESCRIPTION("Driver for Audio & Music Data Transmission Protocol "
1265
                   "on OHCI boards.");
1266
MODULE_SUPPORTED_DEVICE("amdtp");
1267
MODULE_LICENSE("GPL");
1268
 
1269
static int __init amdtp_init_module (void)
1270
{
1271
        if (ieee1394_register_chardev(IEEE1394_MINOR_BLOCK_AMDTP,
1272
                                      THIS_MODULE, &amdtp_fops)) {
1273
                HPSB_ERR("amdtp: unable to get minor device block");
1274
                return -EIO;
1275
        }
1276
 
1277
        devfs_handle = devfs_mk_dir(NULL, "amdtp", NULL);
1278
 
1279
        hpsb_register_highlevel(&amdtp_highlevel);
1280
 
1281
        HPSB_INFO("Loaded AMDTP driver");
1282
 
1283
        return 0;
1284
}
1285
 
1286
static void __exit amdtp_exit_module (void)
1287
{
1288
        hpsb_unregister_highlevel(&amdtp_highlevel);
1289
        devfs_unregister(devfs_handle);
1290
        ieee1394_unregister_chardev(IEEE1394_MINOR_BLOCK_AMDTP);
1291
 
1292
        HPSB_INFO("Unloaded AMDTP driver");
1293
}
1294
 
1295
module_init(amdtp_init_module);
1296
module_exit(amdtp_exit_module);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.