📄 if_athvar.h.svn-base
字号:
#define MAX_REGISTER_NAME_LEN 32 /* Maximum length of register nicknames in debug output */#endif /* #ifdef ATH_REVERSE_ENGINEERING *//* * Convert from net80211 layer values to Ath layer values. Hopefully this will * be optimised away when the two constants are the same. */typedef unsigned int ath_keyix_t;#define ATH_KEY(_keyix) ((_keyix == IEEE80211_KEYIX_NONE) ? HAL_TXKEYIX_INVALID : _keyix)#define ATH_MIN_FF_RATE 12000 /* min rate for ff aggregation in kbps */#define ATH_MIN_FF_RATE 12000 /* min rate for ff aggregation in kbps */struct ath_buf;typedef STAILQ_HEAD(, ath_buf) ath_bufhead;/* driver-specific node state */struct ath_node { struct ieee80211_node an_node; /* base class */ u_int16_t an_decomp_index; /* decompression mask index */ u_int32_t an_avgrssi; /* average rssi over all rx frames */ u_int8_t an_prevdatarix; /* rate ix of last data frame */ u_int16_t an_minffrate; /* min rate in kbps for ff to aggregate */ struct ath_buf *an_tx_ffbuf[WME_NUM_AC]; /* ff staging area */ ath_bufhead an_uapsd_q; /* U-APSD delivery queue */ int an_uapsd_qdepth; /* U-APSD delivery queue depth */ ath_bufhead an_uapsd_overflowq; /* U-APSD overflow queue (for > MaxSp frames) */ int an_uapsd_overflowqdepth; /* U-APSD overflow queue depth */ spinlock_t an_uapsd_lock; /* U-APSD delivery queue lock */ /* variable-length rate control state follows */};#define ATH_NODE(_n) ((struct ath_node *)(_n))#define SKB_AN(_skb) (ATH_NODE(SKB_NI(_skb)))#define ATH_NODE_CONST(ni) ((const struct ath_node *)(ni))#define ATH_NODE_UAPSD_LOCK_INIT(_an) spin_lock_init(&(_an)->an_uapsd_lock)#define ATH_NODE_UAPSD_LOCK_IRQ(_an) do { \ unsigned long __an_uapsd_lockflags; \ ATH_NODE_UAPSD_LOCK_CHECK(_an); \ spin_lock_irqsave(&(_an)->an_uapsd_lock, __an_uapsd_lockflags);#define ATH_NODE_UAPSD_UNLOCK_IRQ(_an) \ ATH_NODE_UAPSD_LOCK_ASSERT(_an); \ spin_unlock_irqrestore(&(_an)->an_uapsd_lock, __an_uapsd_lockflags); \} while (0)#define ATH_NODE_UAPSD_LOCK_IRQ_INSIDE(_an) do { \ ATH_NODE_UAPSD_LOCK_CHECK(_an); \ spin_lock(&(_an)->an_uapsd_lock); \} while (0)#define ATH_NODE_UAPSD_UNLOCK_IRQ_INSIDE(_an) do { \ ATH_NODE_UAPSD_LOCK_ASSERT(_an); \ spin_unlock(&(_an)->an_uapsd_lock); \} while (0)#define ATH_NODE_UAPSD_UNLOCK_IRQ_EARLY(_an) \ ATH_NODE_UAPSD_LOCK_ASSERT(_an); \ spin_unlock_irqrestore(&(_an)->an_uapsd_lock, __an_uapsd_lockflags);#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)#define ATH_NODE_UAPSD_LOCK_ASSERT(_an) \ KASSERT(spin_is_locked(&(_an)->an_uapsd_lock), ("uapsd not locked!"))#if (defined(ATH_DEBUG_SPINLOCKS))#define ATH_NODE_UAPSD_LOCK_CHECK(_an) do { \ if (spin_is_locked(&(_an)->an_uapsd_lock)) \ printk(KERN_DEBUG "%s:%d - about to block on uapsd lock!\n", __func__, __LINE__); \} while (0)#else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */#define ATH_NODE_UAPSD_LOCK_CHECK(_an)#endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */#else#define ATH_NODE_UAPSD_LOCK_ASSERT(_an)#define ATH_NODE_UAPSD_LOCK_CHECK(_an)#endif #define ATH_RSSI_LPF_LEN 10#define ATH_RSSI_DUMMY_MARKER 0x127#define ATH_EP_MUL(x, mul) ((x) * (mul))#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), HAL_RSSI_EP_MULTIPLIER))#define ATH_LPF_RSSI(x, y, len) \ ((x != ATH_RSSI_DUMMY_MARKER) ? (((x) * ((len) - 1) + (y)) / (len)) : (y))#define ATH_RSSI_LPF(x, y) do { \ if ((y) >= -20) \ x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \} while (0)#define ATH_ANTENNA_DIFF 2 /* Num frames difference in * tx to flip default recv * antenna */struct ath_buf { /* FFXXX: convert both list types to TAILQ to save a field? */ STAILQ_ENTRY(ath_buf) bf_list;#ifdef ATH_SUPERG_FF TAILQ_ENTRY(ath_buf) bf_stagelist; /* fast-frame staging list */#endif struct ath_desc *bf_desc; /* virtual addr of desc */ struct ath_desc_status bf_dsstatus; /* tx/rx descriptor status */ dma_addr_t bf_daddr; /* physical addr of desc */ struct sk_buff *bf_skb; /* skbuff for buf */ dma_addr_t bf_skbaddr; /* physical addr of skb data - always used by one desc */ u_int32_t bf_status; /* status flags */ u_int16_t bf_flags; /* tx descriptor flags */ u_int64_t bf_tsf; int16_t bf_channoise;#ifdef ATH_SUPERG_FF /* XXX: combine this with bf_skbaddr if it ever changes to accommodate * multiple segments. */ u_int16_t bf_numdescff; /* number of descs used for FF (these are extra) */ u_int32_t bf_queueage; /* "age" of txq when this buffer placed on stageq */ dma_addr_t bf_skbaddrff[ATH_TXDESC - 1]; /* extra addrs for FF */#endif int bf_taken_at_line; /* XXX: Want full alloc backtrace */ const char *bf_taken_at_func; };/* The last descriptor for a buffer. * NB: This code assumes that the descriptors for a buf are allocated, * contiguously. This assumption is made elsewhere too. */#ifdef ATH_SUPERG_FF# define ATH_BUF_LAST_DESC(_bf) ((_bf)->bf_desc + (_bf)->bf_numdescff)#else# define ATH_BUF_LAST_DESC(_bf) ((_bf)->bf_desc)#endif/* BF_XX(...) macros will blow up if _bf is NULL, but not if _bf->bf_skb is * null. */#define ATH_BUF_CB(_bf) (((_bf)->bf_skb) ? SKB_CB((_bf)->bf_skb) : NULL)#define ATH_BUF_NI(_bf) (((_bf)->bf_skb) ? SKB_NI((_bf)->bf_skb) : NULL)#define ATH_BUF_AN(_bf) (((_bf)->bf_skb) ? SKB_AN((_bf)->bf_skb) : NULL)/* XXX: only managed for rx at the moment */#define ATH_BUFSTATUS_RXDESC_DONE 0x00000001 /* rx descriptor processing complete, desc processed by hal */#define ATH_BUFSTATUS_RADAR_DONE 0x00000002 /* marker to indicate a PHYERR for radar pulse has already been handled. We may receive multiple interrupts before the rx_tasklet clears the queue */#define ATH_BUFSTATUS_RXTSTAMP 0x00000004 /* RX timestamps needs to be adjusted *//* DMA state for tx/rx descriptors. */struct ath_descdma { const char *dd_name; struct ath_desc *dd_desc; /* descriptors */ dma_addr_t dd_desc_paddr; /* physical addr of dd_desc */ size_t dd_desc_len; /* size of dd_desc */ unsigned int dd_ndesc; unsigned int dd_nbuf; struct ath_buf *dd_bufptr; /* associated buffers */};struct ath_hal;struct ath_desc;struct ath_ratectrl;struct ath_tx99;struct proc_dir_entry;/* * Data transmit queue state. One of these exists for each * hardware transmit queue. Packets sent to us from above * are assigned to queues based on their priority. Not all * devices support a complete set of hardware transmit queues. * For those devices the array sc_ac2q will map multiple * priorities to fewer hardware queues (typically all to one * hardware queue). */struct ath_txq { u_int axq_qnum; /* hardware q number */ STAILQ_HEAD(, ath_buf) axq_q; /* transmit queue */ spinlock_t axq_lock; /* lock on q and link */ int axq_depth; /* queue depth */ u_int32_t axq_totalqueued; /* total ever queued */ u_int axq_intrcnt; /* count to determine if descriptor * should generate int on this txq. */ /* * Staging queue for frames awaiting a fast-frame pairing. */ TAILQ_HEAD(axq_headtype, ath_buf) axq_stageq; /* scratch compression buffer */ char *axq_compbuf; /* scratch comp buffer */ dma_addr_t axq_compbufp; /* scratch comp buffer (phys)*/ u_int axq_compbufsz; /* scratch comp buffer size */};/* driver-specific vap state */struct ath_vap { struct ieee80211vap av_vap; /* base class */ int (*av_newstate)(struct ieee80211vap *, enum ieee80211_state, int); /* XXX beacon state */ struct ath_buf *av_bcbuf; /* beacon buffer */ struct ieee80211_beacon_offsets av_boff;/* dynamic update state */ int av_bslot; /* beacon slot index */ struct ath_txq av_mcastq; /* multicast transmit queue */#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) atomic_t av_beacon_alloc; /* set to 1 when the next beacon needs to be recomputed */#else unsigned long av_beacon_alloc;#endif};#define ATH_VAP(_v) ((struct ath_vap *)(_v))#define ATH_BEACON_AIFS_DEFAULT 0 /* Default aifs for ap beacon q */#define ATH_BEACON_CWMIN_DEFAULT 0 /* Default cwmin for ap beacon q */#define ATH_BEACON_CWMAX_DEFAULT 0 /* Default cwmax for ap beacon q */#define ATH_TXQ_INTR_PERIOD 5 /* axq_intrcnt period for intr gen */#define ATH_TXQ_LOCK_INIT(_tq) spin_lock_init(&(_tq)->axq_lock)#define ATH_TXQ_LOCK_DESTROY(_tq)#define ATH_TXQ_LOCK_IRQ(_tq) do { \ unsigned long __axq_lockflags; \ ATH_TXQ_LOCK_CHECK(_tq); \ spin_lock_irqsave(&(_tq)->axq_lock, __axq_lockflags);#define ATH_TXQ_UNLOCK_IRQ(_tq) \ ATH_TXQ_LOCK_ASSERT(_tq); \ spin_unlock_irqrestore(&(_tq)->axq_lock, __axq_lockflags); \} while (0)#define ATH_TXQ_UNLOCK_IRQ_EARLY(_tq) \ ATH_TXQ_LOCK_ASSERT(_tq); \ spin_unlock_irqrestore(&(_tq)->axq_lock, __axq_lockflags);#define ATH_TXQ_LOCK_IRQ_INSIDE(_tq) do { \ ATH_TXQ_LOCK_CHECK(_tq); \ spin_lock(&(_tq)->axq_lock); \} while (0)#define ATH_TXQ_UNLOCK_IRQ_INSIDE(_tq) do { \ ATH_TXQ_LOCK_ASSERT(_tq); \ spin_unlock(&(_tq)->axq_lock); \} while (0)#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && defined(spin_is_locked)#define ATH_TXQ_LOCK_ASSERT(_tq) \ KASSERT(spin_is_locked(&(_tq)->axq_lock), ("txq not locked!"))#if (defined(ATH_DEBUG_SPINLOCKS))#define ATH_TXQ_LOCK_CHECK(_tq) do { \ if (spin_is_locked(&(_tq)->axq_lock)) \ printk(KERN_DEBUG "%s:%d - about to block on txq lock!\n", __func__, __LINE__); \} while (0)#else /* #if (defined(ATH_DEBUG_SPINLOCKS)) */#define ATH_TXQ_LOCK_CHECK(_tq)#endif /* #if (defined(ATH_DEBUG_SPINLOCKS)) */#else#define ATH_TXQ_LOCK_ASSERT(_tq)#define ATH_TXQ_LOCK_CHECK(_tq)#endif#define ATH_TXQ_LAST(_txq) \ STAILQ_LAST(&(_txq)->axq_q, ath_buf, bf_list)static __inline struct ath_desc *ath_txq_last_desc(struct ath_txq *txq){ struct ath_buf *tbf = ATH_TXQ_LAST(txq); return tbf ? ATH_BUF_LAST_DESC(tbf) : NULL;}#define ATH_TXQ_INSERT_TAIL(_tq, _elm, _field) do { \ STAILQ_INSERT_TAIL(&(_tq)->axq_q, (_elm), _field); \ (_tq)->axq_depth++; \ (_tq)->axq_totalqueued++; \} while (0)#define ATH_TXQ_REMOVE_HEAD(_tq, _field) do { \ STAILQ_REMOVE_HEAD(&(_tq)->axq_q, _field); \ --(_tq)->axq_depth; \} while (0)/* Concat. buffers from one queue to other. */#define ATH_TXQ_MOVE_Q(_tqs,_tqd) do { \ (_tqd)->axq_depth += (_tqs)->axq_depth; \ (_tqd)->axq_totalqueued += (_tqs)->axq_totalqueued; \ ATH_TXQ_LINK_DESC((_tqd), STAILQ_FIRST(&(_tqs)->axq_q)); \ STAILQ_CONCAT(&(_tqd)->axq_q, &(_tqs)->axq_q); \ (_tqs)->axq_depth = 0; \ (_tqs)->axq_totalqueued = 0; \} while (0)#define ATH_TXQ_LINK_DESC(_txq, _bf) \ ATH_STQ_LINK_DESC(&(_txq)->axq_q, (_bf))/* NB: This macro's behaviour is dependent upon it being called *before* _bf is * inserted into _stq. */#define ATH_STQ_LINK_DESC(_stq, _bf) do { \ if (STAILQ_FIRST((_stq))) \ ATH_BUF_LAST_DESC( \ STAILQ_LAST((_stq), ath_buf, bf_list) \ )->ds_link = \ ath_ds_link_swap((_bf)->bf_daddr); \ } while (0)#define BSTUCK_THRESH 10 /* # of stuck beacons before resetting NB: this is a guess*/struct ath_rp { struct list_head list; u_int64_t rp_tsf; u_int8_t rp_rssi; u_int8_t rp_width; int rp_index; int rp_allocated; int rp_analyzed;};struct ath_softc { struct ieee80211com sc_ic; /* NB: must be first */ struct net_device *sc_dev; void __iomem *sc_iobase; /* address of the device */ struct semaphore sc_lock; /* dev-level lock */ struct net_device_stats sc_devstats; /* device statistics */ struct ath_stats sc_stats; /* private statistics */ int devid; int sc_debug; int sc_default_ieee80211_debug; /* default debug flags for new VAPs */ int (*sc_recv_mgmt)(struct ieee80211vap *, struct ieee80211_node *, struct sk_buff *, int, int, u_int64_t); void (*sc_node_cleanup)(struct ieee80211_node *); void (*sc_node_free)(struct ieee80211_node *); void *sc_bdev; /* associated bus device */ struct ath_hal *sc_ah; /* Atheros HAL */ spinlock_t sc_hal_lock; /* hardware access lock */ struct ath_ratectrl *sc_rc; /* tx rate control support */ struct ath_tx99 *sc_tx99; /* tx99 support */ void (*sc_setdefantenna)(struct ath_softc *, u_int); unsigned int sc_invalid:1; /* being detached */ unsigned int sc_mrretry:1; /* multi-rate retry support */ unsigned int sc_softled:1; /* enable LED gpio status */ unsigned int sc_splitmic:1; /* split TKIP MIC keys */ unsigned int sc_needmib:1; /* enable MIB stats intr */ unsigned int sc_hasdiversity:1; /* rx diversity available */ unsigned int sc_diversity:1; /* enable rx diversity */ unsigned int sc_olddiversity:1; /* diversity setting before XR enable */ unsigned int sc_hasveol:1; /* tx VEOL support */ unsigned int sc_hastpc:1; /* per-packet TPC support */ unsigned int sc_dturbo:1; /* dynamic turbo capable */ unsigned int sc_dturbo_switch:1; /* turbo switch mode*/ unsigned int sc_dturbo_hold:1; /* dynamic turbo hold state */ unsigned int sc_rate_recn_state:1; /* dynamic turbo state recmded by ratectrl */ unsigned int sc_ignore_ar:1; /* ignore AR during transition */ unsigned int sc_ledstate:1; /* LED on/off state */ unsigned int sc_blinking:1; /* LED blink operation active */ unsigned int sc_beacons:1; /* beacons running */ unsigned int sc_hasbmask:1; /* bssid mask support */ unsigned int sc_mcastkey:1; /* mcast key cache search */ unsigned int sc_hastsfadd:1; /* tsf adjust support */ unsigned int sc_scanning:1; /* scanning active */ unsigned int sc_nostabeacons:1; /* no beacons for station */ unsigned int sc_xrgrppoll:1; /* xr group polls are active */ unsigned int sc_syncbeacon:1; /* sync/resync beacon timers */ unsigned int sc_hasclrkey:1; /* CLR key supported */ /* sc_stagbeacons : If set and several VAPs need to send beacons,
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -