📄 if_ath.c
字号:
dev->name, __func__); if (sc->sc_beacons) ath_beacon_config(sc, NULL); /* restart beacons */ ath_hal_intrset(ah, sc->sc_imask); }}voidath_suspend(struct net_device *dev){ struct ath_softc *sc = dev->priv; DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); ath_stop(dev);}voidath_resume(struct net_device *dev){ struct ath_softc *sc = dev->priv; DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); ath_init(dev);}voidath_shutdown(struct net_device *dev){ struct ath_softc *sc = dev->priv; DPRINTF(sc, ATH_DEBUG_ANY, "%s: flags %x\n", __func__, dev->flags); ath_stop(dev);}static voidath_uapsd_processtriggers(struct ath_softc *sc){ struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; struct ath_desc *ds; struct sk_buff *skb; struct ieee80211_node *ni; struct ath_node *an; struct ieee80211_qosframe *qwh; struct ath_txq *uapsd_xmit_q = sc->sc_uapsdq; struct ieee80211com *ic = &sc->sc_ic; int ac, retval; u_int8_t tid; u_int16_t frame_seq; u_int64_t tsf;#define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) /* XXXAPSD: build in check against max triggers we could see * based on ic->ic_uapsdmaxtriggers. */ tsf = ath_hal_gettsf64(ah); ATH_RXBUF_LOCK(sc); if (sc->sc_rxbufcur == NULL) sc->sc_rxbufcur = STAILQ_FIRST(&sc->sc_rxbuf); for (bf = sc->sc_rxbufcur; bf; bf = STAILQ_NEXT(bf, bf_list)) { ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* NB: never process the self-linked entry at the end */ break; } if (bf->bf_status & ATH_BUFSTATUS_DONE) { /* * already processed this buffer (shouldn't occur if * we change code to always process descriptors in * rx intr handler - as opposed to sometimes processing * in the rx tasklet). */ continue; } skb = bf->bf_skb; if (skb == NULL) { /* XXX ??? can this happen */ printk("%s: no skbuff\n", __func__); continue; } /* * XXXAPSD: consider new hal call that does only the subset * of ath_hal_rxprocdesc we require for trigger search. */ /* * NB: descriptor memory doesn't need to be sync'd * due to the way it was allocated. */ /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ retval = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), tsf); if (HAL_EINPROGRESS == retval) break; /* XXX: we do not support frames spanning multiple descriptors */ bf->bf_status |= ATH_BUFSTATUS_DONE; /* errors? */ if (ds->ds_rxstat.rs_status) continue; /* prepare wireless header for examination */ bus_dma_sync_single(sc->sc_bdev, bf->bf_skbaddr, sizeof(struct ieee80211_qosframe), BUS_DMA_FROMDEVICE); qwh = (struct ieee80211_qosframe *) skb->data; /* find the node. it MUST be in the keycache. */ if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID || (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) == NULL) { /* * XXX: this can occur if WEP mode is used for non-atheros clients * (since we do not know which of the 4 WEP keys will be used * at association time, so cannot setup a key-cache entry. * The Atheros client can convey this in the Atheros IE.) * * TODO: The fix is to use the hash lookup on the node here. */#if 0 /* * This print is very chatty, so removing for now. */ DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: U-APSD node (%s) has invalid keycache entry\n", __func__, ether_sprintf(qwh->i_addr2));#endif continue; } if (!(ni->ni_flags & IEEE80211_NODE_UAPSD)) continue; /* * Must deal with change of state here, since otherwise there would * be a race (on two quick frames from STA) between this code and the * tasklet where we would: * - miss a trigger on entry to PS if we're already trigger hunting * - generate spurious SP on exit (due to frame following exit frame) */ if (((qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) ^ (ni->ni_flags & IEEE80211_NODE_PWR_MGT))) { /* * NB: do not require lock here since this runs at intr * "proper" time and cannot be interrupted by rx tasklet * (code there has lock). May want to place a macro here * (that does nothing) to make this more clear. */ ni->ni_flags |= IEEE80211_NODE_PS_CHANGED; ni->ni_pschangeseq = *(u_int16_t *)(&qwh->i_seq[0]); ni->ni_flags &= ~IEEE80211_NODE_UAPSD_SP; ni->ni_flags ^= IEEE80211_NODE_PWR_MGT; if (qwh->i_fc[1] & IEEE80211_FC1_PWR_MGT) { ni->ni_flags |= IEEE80211_NODE_UAPSD_TRIG; ic->ic_uapsdmaxtriggers++; WME_UAPSD_NODE_TRIGSEQINIT(ni); DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: Node (%s) became U-APSD triggerable (%d)\n", __func__, ether_sprintf(qwh->i_addr2), ic->ic_uapsdmaxtriggers); } else { ni->ni_flags &= ~IEEE80211_NODE_UAPSD_TRIG; ic->ic_uapsdmaxtriggers--; DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: Node (%s) no longer U-APSD triggerable (%d)\n", __func__, ether_sprintf(qwh->i_addr2), ic->ic_uapsdmaxtriggers); /* * XXX: rapidly thrashing sta could get * out-of-order frames due this flush placing * frames on backlogged regular AC queue and * re-entry to PS having fresh arrivals onto * faster UPSD delivery queue. if this is a * big problem we may need to drop these. */ ath_uapsd_flush(ni); } continue; } if (ic->ic_uapsdmaxtriggers == 0) continue; /* make sure the frame is QoS data/null */ /* NB: with current sub-type definitions, the * IEEE80211_FC0_SUBTYPE_QOS check, below, covers the * qos null case too. */ if (((qwh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA) || !(qwh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS)) continue; /* * To be a trigger: * - node is in triggerable state * - QoS data/null frame with triggerable AC */ tid = qwh->i_qos[0] & IEEE80211_QOS_TID; ac = TID_TO_WME_AC(tid); if (!WME_UAPSD_AC_CAN_TRIGGER(ac, ni)) continue; DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: U-APSD trigger detected for node (%s) on AC %d\n", __func__, ether_sprintf(ni->ni_macaddr), ac); if (ni->ni_flags & IEEE80211_NODE_UAPSD_SP) { /* have trigger, but SP in progress, so ignore */ DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: SP already in progress - ignoring\n", __func__); continue; } /* * Detect duplicate triggers and drop if so. */ frame_seq = le16toh(*(u_int16_t *)qwh->i_seq); if ((qwh->i_fc[1] & IEEE80211_FC1_RETRY) && frame_seq == ni->ni_uapsd_trigseq[ac]) { DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: dropped dup trigger, ac %d, seq %d\n", __func__, ac, frame_seq); continue; } an = ATH_NODE(ni); /* start the SP */ ATH_NODE_UAPSD_LOCK(an); ni->ni_stats.ns_uapsd_triggers++; ni->ni_flags |= IEEE80211_NODE_UAPSD_SP; ni->ni_uapsd_trigseq[ac] = frame_seq; ATH_NODE_UAPSD_UNLOCK(an); ATH_TXQ_LOCK(uapsd_xmit_q); if (STAILQ_EMPTY(&an->an_uapsd_q)) { DPRINTF(sc, ATH_DEBUG_UAPSD, "%s: Queue empty, generating QoS NULL to send\n", __func__); /* * Empty queue, so need to send qos null on this ac. Make a * call that will dump a qos null onto the node's queue, then * we can proceed as normal. */ ieee80211_send_qosnulldata(ni, ac); } if (STAILQ_FIRST(&an->an_uapsd_q)) { struct ath_buf *last_buf = STAILQ_LAST(&an->an_uapsd_q, ath_buf, bf_list); struct ath_desc *last_desc = last_buf->bf_desc; struct ieee80211_qosframe *qwhl = (struct ieee80211_qosframe *)last_buf->bf_skb->data; /* * NB: flip the bit to cause intr on the EOSP desc, * which is the last one */ ath_hal_txreqintrdesc(sc->sc_ah, last_desc); qwhl->i_qos[0] |= IEEE80211_QOS_EOSP; if (IEEE80211_VAP_EOSPDROP_ENABLED(ni->ni_vap)) { /* simulate lost EOSP */ qwhl->i_addr1[0] |= 0x40; } /* more data bit only for EOSP frame */ if (an->an_uapsd_overflowqdepth) qwhl->i_fc[1] |= IEEE80211_FC1_MORE_DATA; else if (IEEE80211_NODE_UAPSD_USETIM(ni)) ni->ni_vap->iv_set_tim(ni, 0); ni->ni_stats.ns_tx_uapsd += an->an_uapsd_qdepth; bus_dma_sync_single(sc->sc_bdev, last_buf->bf_skbaddr, sizeof(*qwhl), BUS_DMA_TODEVICE); if (uapsd_xmit_q->axq_link) {#ifdef AH_NEED_DESC_SWAP *uapsd_xmit_q->axq_link = cpu_to_le32(STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr);#else *uapsd_xmit_q->axq_link = STAILQ_FIRST(&an->an_uapsd_q)->bf_daddr;#endif } /* below leaves an_uapsd_q NULL */ STAILQ_CONCAT(&uapsd_xmit_q->axq_q, &an->an_uapsd_q); uapsd_xmit_q->axq_link = &last_desc->ds_link; ath_hal_puttxbuf(sc->sc_ah, uapsd_xmit_q->axq_qnum, (STAILQ_FIRST(&uapsd_xmit_q->axq_q))->bf_daddr); ath_hal_txstart(sc->sc_ah, uapsd_xmit_q->axq_qnum); } an->an_uapsd_qdepth = 0; ATH_TXQ_UNLOCK(uapsd_xmit_q); } sc->sc_rxbufcur = bf; ATH_RXBUF_UNLOCK(sc);#undef PA2DESC}/* * Interrupt handler. Most of the actual processing is deferred. */irqreturn_tath_intr(int irq, void *dev_id, struct pt_regs *regs){ struct net_device *dev = dev_id; struct ath_softc *sc = dev->priv; struct ath_hal *ah = sc->sc_ah; HAL_INT status; int needmark; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ return IRQ_NONE; } if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ return IRQ_NONE; if ((dev->flags & (IFF_RUNNING | IFF_UP)) != (IFF_RUNNING | IFF_UP)) { DPRINTF(sc, ATH_DEBUG_INTR, "%s: flags 0x%x\n", __func__, dev->flags); ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ return IRQ_HANDLED; } needmark = 0; /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include * bits we haven't explicitly enabled so we mask the * value to ensure we only process bits we requested. */ ath_hal_getisr(ah, &status); /* NB: clears ISR too */ DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); status &= sc->sc_imask; /* discard unasked for bits */ if (status & HAL_INT_FATAL) { sc->sc_stats.ast_hardware++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ ATH_SCHEDULE_TQUEUE(&sc->sc_fataltq, &needmark); } else if (status & HAL_INT_RXORN) { sc->sc_stats.ast_rxorn++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ ATH_SCHEDULE_TQUEUE(&sc->sc_rxorntq, &needmark); } else { if (status & HAL_INT_SWBA) { /* * Software beacon alert--time to send a beacon. * Handle beacon transmission directly; deferring * this is too slow to meet timing constraints * under load. */ ath_beacon_send(sc, &needmark); } if (status & HAL_INT_RXEOL) { /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->sc_stats.ast_rxeol++; } if (status & HAL_INT_TXURN) { sc->sc_stats.ast_txurn++; /* bump tx trigger level */ ath_hal_updatetxtriglevel(ah, AH_TRUE); } if (status & HAL_INT_RX) { ath_uapsd_processtriggers(sc); ATH_SCHEDULE_TQUEUE(&sc->sc_rxtq, &needmark); } if (status & HAL_INT_TX) {#ifdef ATH_SUPERG_DYNTURBO /* * Check if the beacon queue caused the interrupt * when a dynamic turbo switch * is pending so we can initiate the change. * XXX must wait for all vap's beacons */ if (sc->sc_dturbo_switch) { u_int32_t txqs = (1 << sc->sc_bhalq); ath_hal_gettxintrtxqs(ah, &txqs); if(txqs & (1 << sc->sc_bhalq)) { sc->sc_dturbo_switch = 0; /* * Hack: defer switch for 10ms to permit slow * clients time to track us. This especially * noticeable with Windows clients. */ mod_timer(&sc->sc_dturbo_switch_mode, jiffies + msecs_to_jiffies(10)); } } #endif ATH_SCHEDULE_TQUEUE(&sc->sc_txtq, &needmark); } if (status & HAL_INT_BMISS) { sc->sc_stats.ast_bmiss++; ATH_SCHEDULE_TQUEUE(&sc->sc_bmisstq, &needmark); } if (status & HAL_INT_MIB) { sc->sc_stats.ast_mib++; /* * Disable interrupts until we service the MIB * interrupt; otherwise it will continue to fire. */ ath_hal_intrset(ah, 0); /* * Let the hal handle the event. We assume it will * clear whatever condition caused the interrupt.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -