⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 base.c

📁 PCI base driver source code
💻 C
📖 第 1 页 / 共 5 页
字号:
/*
 * TE410P  Quad-T1/E1 PCI Driver version 0.1, 12/16/02
 *
 * Written by Mark Spencer <markster@digium.com>
 * Based on previous works, designs, and archetectures conceived and
 * written by Jim Dixon <jim@lambdatel.com>.
 *
 * Copyright (C) 2001 Jim Dixon / Zapata Telephony.
 * Copyright (C) 2001-2005, Digium, Inc.
 *
 * All rights reserved.
 *
 */

/*
 * See http://www.asterisk.org for more information aboutt
 * the Asterisk project. Please do not directly contact
 * any of the maintainers of this project for assistance;
 * the project provides a web site, mailing lists and IRC
 * channels for your use.
 *
 * This program is free software, distributed under the terms of
 * the GNU General Public License Version 2 as published by the
 * Free Software Foundation. See the LICENSE file included with
 * this program for more details.
 */

#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>T
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/io.h>
#include <linux/version.h>
#include <linux/delay.h>
#include "zaptel.h"
#ifdef LINUX26
#include <linux/moduleparam.h>
#endif

#include "wct4xxp.h"
#include "vpm450m.h"

/* Work queues are a way to better distribute load on SMP systems */
#if defined(LINUX26) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
/*
 * Work queues can significantly improve performance and scalability
 * on multi-processor machines, but requires bypassing some kernel
 * API's, so it's not guaranteed to be compatible with all kernels.
 */
/* #define ENABLE_WORKQUEUES */
#endif

/* Enable prefetching may help performance */
#define ENABLE_PREFETCH

/* Support first generation cards? */
#define SUPPORT_GEN1 

/* Define to get more attention-grabbing but slightly more I/O using
   alarm status */
#define FANCY_ALARM

/* Define to support Digium Voice Processing Module expansion card */
#define VPM_SUPPORT






#define DEBUG_MAIN 		(1 << 0)
#define DEBUG_DTMF 		(1 << 1)
#define DEBUG_REGS 		(1 << 2)
#define DEBUG_TSI  		(1 << 3)
#define DEBUG_ECHOCAN 	(1 << 4)
#define DEBUG_RBS 		(1 << 5)
#define DEBUG_FRAMER		(1 << 6)

#ifdef ENABLE_WORKQUEUES
#include <linux/cpu.h>

/* XXX UGLY!!!! XXX  We have to access the direct structures of the workqueue which
  are only defined within workqueue.c because they don't give us a routine to allow us
  to nail a work to a particular thread of the CPU.  Nailing to threads gives us substantially
  higher scalability in multi-CPU environments though! */

/*
 * The per-CPU workqueue (if single thread, we always use cpu 0's).
 *
 * The sequence counters are for flush_scheduled_work().  It wants to wait
 * until until all currently-scheduled works are completed, but it doesn't
 * want to be livelocked by new, incoming ones.  So it waits until
 * remove_sequence is >= the insert_sequence which pertained when
 * flush_scheduled_work() was called.
 */
 
struct cpu_workqueue_struct {

	spinlock_t lock;

	long remove_sequence;	/* Least-recently added (next to run) */
	long insert_sequence;	/* Next to add */

	struct list_head worklist;
	wait_queue_head_t more_work;
	wait_queue_head_t work_done;

	struct workqueue_struct *wq;
	task_t *thread;

	int run_depth;		/* Detect run_workqueue() recursion depth */
} ____cacheline_aligned;

/*
 * The externally visible workqueue abstraction is an array of
 * per-CPU workqueues:
 */
struct workqueue_struct {
	/* TODO: Find out exactly where the API changed */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,15)
	struct cpu_workqueue_struct *cpu_wq;
#else
	struct cpu_workqueue_struct cpu_wq[NR_CPUS];
#endif
	const char *name;
	struct list_head list; 	/* Empty if single thread */
};

/* Preempt must be disabled. */
static void __t4_queue_work(struct cpu_workqueue_struct *cwq,
			 struct work_struct *work)
{
	unsigned long flags;

	spin_lock_irqsave(&cwq->lock, flags);
	work->wq_data = cwq;
	list_add_tail(&work->entry, &cwq->worklist);
	cwq->insert_sequence++;
	wake_up(&cwq->more_work);
	spin_unlock_irqrestore(&cwq->lock, flags);
}

/*
 * Queue work on a workqueue. Return non-zero if it was successfully
 * added.
 *
 * We queue the work to the CPU it was submitted, but there is no
 * guarantee that it will be processed by that CPU.
 */
static inline int t4_queue_work(struct workqueue_struct *wq, struct work_struct *work, int cpu)
{
	int ret = 0;
	get_cpu();
	if (!test_and_set_bit(0, &work->pending)) {
		BUG_ON(!list_empty(&work->entry));
		__t4_queue_work(wq->cpu_wq + cpu, work);
		ret = 1;
	}
	put_cpu();
	return ret;
}

#endif

static int pedanticpci = 1;
static int debug=0;
static int timingcable = 0;
static int highestorder;
static int t1e1override = -1; //0xFF; // -1 = jumper; 0xFF = E1
static int j1mode = 0;
static int sigmode = FRMR_MODE_NO_ADDR_CMP;
static int loopback = 0;
static int alarmdebounce = 0;
#ifdef VPM_SUPPORT
static int vpmsupport = 1;
/* If set to auto, vpmdtmfsupport is enabled for VPM400M and disabled for VPM450M */
static int vpmdtmfsupport = -1; /* -1=auto, 0=disabled, 1=enabled*/
static int vpmspans = 4;
#define VPM_DEFAULT_DTMFTHRESHOLD 1000
static int dtmfthreshold = VPM_DEFAULT_DTMFTHRESHOLD;
static int lastdtmfthreshold = VPM_DEFAULT_DTMFTHRESHOLD;
#endif
/* Enabling bursting can more efficiently utilize PCI bus bandwidth, but
   can also cause PCI bus starvation, especially in combination with other
   aggressive cards.  Please note that burst mode has no effect on CPU
   utilization / max number of calls / etc. */
static int noburst = 1;
/* For 56kbps links, set this module parameter to 0x7f */
static int hardhdlcmode = 0xff;

#ifdef FANCY_ALARM
static int altab[] = {
0, 0, 0, 1, 2, 3, 4, 6, 8, 9, 11, 13, 16, 18, 20, 22, 24, 25, 27, 28, 29, 30, 31, 31, 32, 31, 31, 30, 29, 28, 27, 25, 23, 22, 20, 18, 16, 13, 11, 9, 8, 6, 4, 3, 2, 1, 0, 0, 
};
#endif

#define MAX_SPANS 16

#define FLAG_STARTED (1 << 0)
#define FLAG_NMF (1 << 1)
#define FLAG_SENDINGYELLOW (1 << 2)


#define	TYPE_T1	1		/* is a T1 card */
#define	TYPE_E1	2		/* is an E1 card */
#define TYPE_J1 3		/* is a running J1 */

#define FLAG_2NDGEN  (1 << 3)
#define FLAG_2PORT   (1 << 4)
#define FLAG_VPM2GEN (1 << 5)
#define FLAG_OCTOPT  (1 << 6)
#define FLAG_3RDGEN  (1 << 7)
#define FLAG_BURST   (1 << 8)
#define FLAG_EXPRESS (1 << 9)

#define CANARY 0xc0de

struct devtype {
	char *desc;
	unsigned int flags;
};

static struct devtype wct4xxp = { "Wildcard TE410P/TE405P (1st Gen)", 0 };
static struct devtype wct420p4 = { "Wildcard TE420 (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_EXPRESS };
static struct devtype wct410p4 = { "Wildcard TE410P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN };
static struct devtype wct410p3 = { "Wildcard TE410P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN };
static struct devtype wct405p4 = { "Wildcard TE405P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN };
static struct devtype wct405p3 = { "Wildcard TE405P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN };
static struct devtype wct410p2 = { "Wildcard TE410P (2nd Gen)", FLAG_2NDGEN };
static struct devtype wct405p2 = { "Wildcard TE405P (2nd Gen)", FLAG_2NDGEN };
static struct devtype wct220p4 = { "Wildcard TE220 (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT | FLAG_EXPRESS };
static struct devtype wct205p4 = { "Wildcard TE205P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
static struct devtype wct205p3 = { "Wildcard TE205P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
static struct devtype wct210p4 = { "Wildcard TE210P (4th Gen)", FLAG_BURST | FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
static struct devtype wct210p3 = { "Wildcard TE210P (3rd Gen)", FLAG_2NDGEN | FLAG_3RDGEN | FLAG_2PORT };
static struct devtype wct205 = { "Wildcard TE205P ", FLAG_2NDGEN | FLAG_2PORT };
static struct devtype wct210 = { "Wildcard TE210P ", FLAG_2NDGEN | FLAG_2PORT };
	

struct t4;

struct t4_span {
	struct t4 *owner;
	unsigned int *writechunk;					/* Double-word aligned write memory */
	unsigned int *readchunk;					/* Double-word aligned read memory */
	int spantype;		/* card type, T1 or E1 or J1 */
	int sync;
	int psync;
	int alarmtimer;
	int redalarms;
	int notclear;
	int alarmcount;
	int spanflags;
	int syncpos;
#ifdef SUPPORT_GEN1
	int e1check;			/* E1 check */
#endif
	struct zt_span span;
	unsigned char txsigs[16];	/* Transmit sigs */
	int loopupcnt;
	int loopdowncnt;
#ifdef SUPPORT_GEN1
	unsigned char ec_chunk1[31][ZT_CHUNKSIZE]; /* first EC chunk buffer */
	unsigned char ec_chunk2[31][ZT_CHUNKSIZE]; /* second EC chunk buffer */
#endif
	int irqmisses;
	
	/* HDLC controller fields */
	struct zt_chan *sigchan;
	unsigned char sigmode;
	int sigactive;
	int frames_out;
	int frames_in;

#ifdef VPM_SUPPORT
	unsigned long dtmfactive;
	unsigned long dtmfmask;
	unsigned long dtmfmutemask;
	short dtmfenergy[31];
	short dtmfdigit[31];
#endif
#ifdef ENABLE_WORKQUEUES
	struct work_struct swork;
#endif	
	struct zt_chan chans[0];		/* Individual channels */
};

struct t4 {
	/* This structure exists one per card */
	struct pci_dev *dev;		/* Pointer to PCI device */
	unsigned int intcount;
	int num;			/* Which card we are */
	int t1e1;			/* T1/E1 select pins */
	int globalconfig;	/* Whether global setup has been done */
	int syncsrc;			/* active sync source */
	struct t4_span *tspans[4];	/* Individual spans */
	int numspans;			/* Number of spans on the card */
	int blinktimer;
#ifdef FANCY_ALARM
	int alarmpos;
#endif
	int irq;			/* IRQ used by device */
	int order;			/* Order */
	int flags;			/* Device flags */
	int master;				/* Are we master */
	int ledreg;				/* LED Register */
	unsigned int gpio;
	unsigned int gpioctl;
	int e1recover;			/* E1 recovery timer */
	spinlock_t reglock;		/* lock register access */
	int spansstarted;		/* number of spans started */
	volatile unsigned int *writechunk;					/* Double-word aligned write memory */
	volatile unsigned int *readchunk;					/* Double-word aligned read memory */
	unsigned short canary;
#ifdef ENABLE_WORKQUEUES
	atomic_t worklist;
	struct workqueue_struct *workq;
#endif
	unsigned int passno;	/* number of interrupt passes */
	char *variety;
	int last0;		/* for detecting double-missed IRQ */

	/* DMA related fields */
	unsigned int dmactrl;
	dma_addr_t 	readdma;
	dma_addr_t	writedma;
	unsigned long memaddr;		/* Base address of card */
	unsigned long memlen;
	volatile unsigned int *membase;	/* Base address of card */

	/* Add this for our softlockup protector */
	unsigned int oct_rw_count;

	/* Flags for our bottom half */
	unsigned long checkflag;
	struct tasklet_struct t4_tlet;
	unsigned int vpm400checkstatus;
	
#ifdef VPM_SUPPORT
	struct vpm450m *vpm450m;
	int vpm;
#endif	

};

#define T4_VPM_PRESENT (1 << 28)


#ifdef VPM_SUPPORT
static void t4_vpm400_init(struct t4 *wc);
static void t4_vpm450_init(struct t4 *wc);
static void t4_vpm_set_dtmf_threshold(struct t4 *wc, unsigned int threshold);
#endif
static void __set_clear(struct t4 *wc, int span);
static int t4_startup(struct zt_span *span);
static int t4_shutdown(struct zt_span *span);
static int t4_rbsbits(struct zt_chan *chan, int bits);
static int t4_maint(struct zt_span *span, int cmd);
#ifdef SUPPORT_GEN1
static int t4_reset_dma(struct t4 *wc);
#endif
static void t4_hdlc_hard_xmit(struct zt_chan *chan);
static int t4_ioctl(struct zt_chan *chan, unsigned int cmd, unsigned long data);
static void t4_tsi_assign(struct t4 *wc, int fromspan, int fromchan, int tospan, int tochan);
static void t4_tsi_unassign(struct t4 *wc, int tospan, int tochan);
static void __t4_set_timing_source(struct t4 *wc, int unit, int master, int slave);
static void t4_check_alarms(struct t4 *wc, int span);
static void t4_check_sigbits(struct t4 *wc, int span);

#define WC_RDADDR	0
#define WC_WRADDR	1
#define WC_COUNT	2
#define WC_DMACTRL	3	
#define WC_INTR		4
/* #define WC_GPIO		5 */
#define WC_VERSION	6
#define WC_LEDS		7
#define WC_GPIOCTL	8
#define WC_GPIO		9
#define WC_LADDR	10
#define WC_LDATA		11
#define WC_LCS		(1 << 11)
#define WC_LCS2		(1 << 12)
#define WC_LALE			(1 << 13)
#define WC_LFRMR_CS	(1 << 10)	/* Framer's ChipSelect signal */
#define WC_ACTIVATE	(1 << 12)
#define WC_LREAD			(1 << 15)
#define WC_LWRITE		(1 << 16)

#define WC_OFF    (0)
#define WC_RED    (1)
#define WC_GREEN  (2)
#define WC_YELLOW (3)

#define MAX_T4_CARDS 64

static void t4_isr_bh(unsigned long data);

static struct t4 *cards[MAX_T4_CARDS];


#define MAX_TDM_CHAN 32
#define MAX_DTMF_DET 16

#define HDLC_IMR0_MASK (FRMR_IMR0_RME | FRMR_IMR0_RPF)
#if 0
#define HDLC_IMR1_MASK (FRMR_IMR1_ALLS | FRMR_IMR1_XDU | FRMR_IMR1_XPR)
#else
#define HDLC_IMR1_MASK	(FRMR_IMR1_XDU | FRMR_IMR1_XPR)
#endif

static inline unsigned int __t4_pci_in(struct t4 *wc, const unsigned int addr)
{
	unsigned int res = readl(&wc->membase[addr]);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -