📄 amdtp.c
字号:
/* -*- c-basic-offset: 8 -*- * * amdtp.c - Audio and Music Data Transmission Protocol Driver * Copyright (C) 2001 Kristian H鴊sberg * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *//* OVERVIEW * -------- * * The AMDTP driver is designed to expose the IEEE1394 bus as a * regular OSS soundcard, i.e. you can link /dev/dsp to /dev/amdtp and * then your favourite MP3 player, game or whatever sound program will * output to an IEEE1394 isochronous channel. The signal destination * could be a set of IEEE1394 loudspeakers (if and when such things * become available) or an amplifier with IEEE1394 input (like the * Sony STR-LSA1). The driver only handles the actual streaming, some * connection management is also required for this to actually work. * That is outside the scope of this driver, and furthermore it is not * really standardized yet. * * The Audio and Music Data Tranmission Protocol is available at * * http://www.1394ta.org/Download/Technology/Specifications/2001/AM20Final-jf2.pdf * * * TODO * ---- * * - We should be able to change input sample format between LE/BE, as * we already shift the bytes around when we construct the iso * packets. * * - Fix DMA stop after bus reset! * * - Clean up iso context handling in ohci1394. * * * MAYBE TODO * ---------- * * - Receive data for local playback or recording. Playback requires * soft syncing with the sound card. * * - Signal processing, i.e. receive packets, do some processing, and * transmit them again using the same packet structure and timestamps * offset by processing time. * * - Maybe make an ALSA interface, that is, create a file_ops * implementation that recognizes ALSA ioctls and uses defaults for * things that can't be controlled through ALSA (iso channel). * * Changes: * * - Audit copy_from_user in amdtp_write. * Daniele Bellucci <bellucda@tiscali.it> * */#include <linux/module.h>#include <linux/list.h>#include <linux/sched.h>#include <linux/types.h>#include <linux/fs.h>#include <linux/ioctl.h>#include <linux/wait.h>#include <linux/pci.h>#include <linux/interrupt.h>#include <linux/poll.h>#include <asm/uaccess.h>#include <asm/atomic.h>#include "hosts.h"#include "highlevel.h"#include "ieee1394.h"#include "ieee1394_core.h"#include "ohci1394.h"#include "amdtp.h"#include "cmp.h"#define FMT_AMDTP 0x10#define FDF_AM824 0x00#define FDF_SFC_32KHZ 0x00#define FDF_SFC_44K1HZ 0x01#define FDF_SFC_48KHZ 0x02#define FDF_SFC_88K2HZ 0x03#define FDF_SFC_96KHZ 0x04#define FDF_SFC_176K4HZ 0x05#define FDF_SFC_192KHZ 0x06struct descriptor_block { struct output_more_immediate { u32 control; u32 pad0; u32 skip; u32 pad1; u32 header[4]; } header_desc; struct output_last { u32 control; u32 data_address; u32 branch; u32 status; } payload_desc;};struct packet { struct descriptor_block *db; dma_addr_t db_bus; struct iso_packet *payload; dma_addr_t payload_bus;};#include <asm/byteorder.h>#if defined __BIG_ENDIAN_BITFIELDstruct iso_packet { /* First quadlet */ unsigned int dbs : 8; unsigned int eoh0 : 2; unsigned int sid : 6; unsigned int dbc : 8; unsigned int fn : 2; unsigned int qpc : 3; unsigned int sph : 1; unsigned int reserved : 2; /* Second quadlet */ unsigned int fdf : 8; unsigned int eoh1 : 2; unsigned int fmt : 6; unsigned int syt : 16; quadlet_t data[0];};#elif defined __LITTLE_ENDIAN_BITFIELDstruct iso_packet { /* First quadlet */ unsigned int sid : 6; unsigned int eoh0 : 2; unsigned int dbs : 8; unsigned int reserved : 2; unsigned int sph : 1; unsigned int qpc : 3; unsigned int fn : 2; unsigned int dbc : 8; /* Second quadlet */ unsigned int fmt : 6; unsigned int eoh1 : 2; unsigned int fdf : 8; unsigned int syt : 16; quadlet_t data[0];};#else#error Unknown bitfield type#endifstruct fraction { int integer; int numerator; int denominator;};#define PACKET_LIST_SIZE 256#define MAX_PACKET_LISTS 4struct packet_list { struct list_head link; int last_cycle_count; struct packet packets[PACKET_LIST_SIZE];};#define BUFFER_SIZE 128/* This implements a circular buffer for incoming samples. */struct buffer { size_t head, tail, length, size; unsigned char data[0];};struct stream { int iso_channel; int format; int rate; int dimension; int fdf; int mode; int sample_format; struct cmp_pcr *opcr; /* Input samples are copied here. */ struct buffer *input; /* ISO Packer state */ unsigned char dbc; struct packet_list *current_packet_list; int current_packet; struct fraction ready_samples, samples_per_cycle; /* We use these to generate control bits when we are packing * iec958 data. */ int iec958_frame_count; int iec958_rate_code; /* The cycle_count and cycle_offset fields are used for the * synchronization timestamps (syt) in the cip header. They * are incremented by at least a cycle every time we put a * time stamp in a packet. As we don't time stamp all * packages, cycle_count isn't updated in every cycle, and * sometimes it's incremented by 2. Thus, we have * cycle_count2, which is simply incremented by one with each * packet, so we can compare it to the transmission time * written back in the dma programs. */ atomic_t cycle_count, cycle_count2; struct fraction cycle_offset, ticks_per_syt_offset; int syt_interval; int stale_count; /* Theses fields control the sample output to the DMA engine. * The dma_packet_lists list holds packet lists currently * queued for dma; the head of the list is currently being * processed. The last program in a packet list generates an * interrupt, which removes the head from dma_packet_lists and * puts it back on the free list. */ struct list_head dma_packet_lists; struct list_head free_packet_lists; wait_queue_head_t packet_list_wait; spinlock_t packet_list_lock; struct ohci1394_iso_tasklet iso_tasklet; struct pci_pool *descriptor_pool, *packet_pool; /* Streams at a host controller are chained through this field. */ struct list_head link; struct amdtp_host *host;};struct amdtp_host { struct hpsb_host *host; struct ti_ohci *ohci; struct list_head stream_list; devfs_handle_t devfs; spinlock_t stream_list_lock;};static devfs_handle_t devfs_handle;static struct hpsb_highlevel amdtp_highlevel;/* FIXME: This doesn't belong here... */#define OHCI1394_CONTEXT_CYCLE_MATCH 0x80000000#define OHCI1394_CONTEXT_RUN 0x00008000#define OHCI1394_CONTEXT_WAKE 0x00001000#define OHCI1394_CONTEXT_DEAD 0x00000800#define OHCI1394_CONTEXT_ACTIVE 0x00000400void ohci1394_start_it_ctx(struct ti_ohci *ohci, int ctx, dma_addr_t first_cmd, int z, int cycle_match){ reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << ctx); reg_write(ohci, OHCI1394_IsoXmitCommandPtr + ctx * 16, first_cmd | z); reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, ~0); wmb(); reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16, OHCI1394_CONTEXT_CYCLE_MATCH | (cycle_match << 16) | OHCI1394_CONTEXT_RUN);}void ohci1394_wake_it_ctx(struct ti_ohci *ohci, int ctx){ reg_write(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16, OHCI1394_CONTEXT_WAKE);}void ohci1394_stop_it_ctx(struct ti_ohci *ohci, int ctx, int synchronous){ u32 control; int wait; reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << ctx); reg_write(ohci, OHCI1394_IsoXmitContextControlClear + ctx * 16, OHCI1394_CONTEXT_RUN); wmb(); if (synchronous) { for (wait = 0; wait < 5; wait++) { control = reg_read(ohci, OHCI1394_IsoXmitContextControlSet + ctx * 16); if ((control & OHCI1394_CONTEXT_ACTIVE) == 0) break; set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } }}/* Note: we can test if free_packet_lists is empty without aquiring * the packet_list_lock. The interrupt handler only adds to the free * list, there is no race condition between testing the list non-empty * and acquiring the lock. */static struct packet_list *stream_get_free_packet_list(struct stream *s){ struct packet_list *pl; unsigned long flags; if (list_empty(&s->free_packet_lists)) return NULL; spin_lock_irqsave(&s->packet_list_lock, flags); pl = list_entry(s->free_packet_lists.next, struct packet_list, link); list_del(&pl->link); spin_unlock_irqrestore(&s->packet_list_lock, flags); return pl;}static void stream_start_dma(struct stream *s, struct packet_list *pl){ u32 syt_cycle, cycle_count, start_cycle; cycle_count = reg_read(s->host->ohci, OHCI1394_IsochronousCycleTimer) >> 12; syt_cycle = (pl->last_cycle_count - PACKET_LIST_SIZE + 1) & 0x0f; /* We program the DMA controller to start transmission at * least 17 cycles from now - this happens when the lower four * bits of cycle_count is 0x0f and syt_cycle is 0, in this * case the start cycle is cycle_count - 15 + 32. */ start_cycle = (cycle_count & ~0x0f) + 32 + syt_cycle; if ((start_cycle & 0x1fff) >= 8000) start_cycle = start_cycle - 8000 + 0x2000; ohci1394_start_it_ctx(s->host->ohci, s->iso_tasklet.context, pl->packets[0].db_bus, 3, start_cycle & 0x7fff);}static void stream_put_dma_packet_list(struct stream *s, struct packet_list *pl){ unsigned long flags; struct packet_list *prev; /* Remember the cycle_count used for timestamping the last packet. */ pl->last_cycle_count = atomic_read(&s->cycle_count2) - 1; pl->packets[PACKET_LIST_SIZE - 1].db->payload_desc.branch = 0; spin_lock_irqsave(&s->packet_list_lock, flags); list_add_tail(&pl->link, &s->dma_packet_lists); spin_unlock_irqrestore(&s->packet_list_lock, flags); prev = list_entry(pl->link.prev, struct packet_list, link); if (pl->link.prev != &s->dma_packet_lists) { struct packet *last = &prev->packets[PACKET_LIST_SIZE - 1]; last->db->payload_desc.branch = pl->packets[0].db_bus | 3; last->db->header_desc.skip = pl->packets[0].db_bus | 3; ohci1394_wake_it_ctx(s->host->ohci, s->iso_tasklet.context); } else stream_start_dma(s, pl);}static void stream_shift_packet_lists(unsigned long l){ struct stream *s = (struct stream *) l; struct packet_list *pl; struct packet *last; int diff; if (list_empty(&s->dma_packet_lists)) { HPSB_ERR("empty dma_packet_lists in %s", __FUNCTION__); return; } /* Now that we know the list is non-empty, we can get the head * of the list without locking, because the process context * only adds to the tail. */ pl = list_entry(s->dma_packet_lists.next, struct packet_list, link); last = &pl->packets[PACKET_LIST_SIZE - 1]; /* This is weird... if we stop dma processing in the middle of * a packet list, the dma context immediately generates an * interrupt if we enable it again later. This only happens * when amdtp_release is interrupted while waiting for dma to * complete, though. Anyway, we detect this by seeing that * the status of the dma descriptor that we expected an * interrupt from is still 0. */ if (last->db->payload_desc.status == 0) { HPSB_INFO("weird interrupt..."); return; } /* If the last descriptor block does not specify a branch * address, we have a sample underflow. */ if (last->db->payload_desc.branch == 0) HPSB_INFO("FIXME: sample underflow...");
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -