📄 ieee80211_input.c
字号:
} else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && key == NULL) { IEEE80211_NODE_STAT(ni, rx_unencrypted); return 0; } } return 1;#undef IS_EAPOL#undef PAIRWISE_SET}/* * Context: softIRQ (tasklet) */intieee80211_input_all(struct ieee80211com *ic, struct sk_buff *skb, int rssi, u_int32_t rstamp){ struct ieee80211vap *vap; int type = -1; /* XXX locking */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { struct ieee80211_node *ni; struct sk_buff *skb1; if (TAILQ_NEXT(vap, iv_next) != NULL) { skb1 = skb_copy(skb, GFP_ATOMIC); if (skb1 == NULL) { /* XXX stat+msg */ continue; } } else { skb1 = skb; skb = NULL; } ni = ieee80211_ref_node(vap->iv_bss); type = ieee80211_input(ni, skb1, rssi, rstamp); ieee80211_free_node(ni); } if (skb != NULL) /* no vaps, reclaim skb */ dev_kfree_skb(skb); return type;}EXPORT_SYMBOL(ieee80211_input_all);/* * This function reassemble fragments using the skb of the 1st fragment, * if large enough. If not, a new skb is allocated to hold incoming * fragments. * * Fragments are copied at the end of the previous fragment. A different * strategy could have been used, where a non-linear skb is allocated and * fragments attached to that skb. */static struct sk_buff *ieee80211_defrag(struct ieee80211_node *ni, struct sk_buff *skb, int hdrlen){ struct ieee80211_frame *wh = (struct ieee80211_frame *) skb->data; u_int16_t rxseq, last_rxseq; u_int8_t fragno, last_fragno; u_int8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG; rxseq = le16_to_cpu(*(u_int16_t *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; fragno = le16_to_cpu(*(u_int16_t *)wh->i_seq) & IEEE80211_SEQ_FRAG_MASK; /* Quick way out, if there's nothing to defragment */ if (!more_frag && fragno == 0 && ni->ni_rxfrag[0] == NULL) return skb; /* * Remove frag to ensure it doesn't get reaped by timer. */ if (ni->ni_table == NULL) { /* * Should never happen. If the node is orphaned (not in * the table) then input packets should not reach here. * Otherwise, a concurrent request that yanks the table * should be blocked by other interlocking and/or by first * shutting the driver down. Regardless, be defensive * here and just bail */ /* XXX need msg+stat */ dev_kfree_skb(skb); return NULL; } /* * Use this lock to make sure ni->ni_rxfrag[0] is * not freed by the timer process while we use it. * XXX bogus */ IEEE80211_NODE_LOCK_IRQ(ni->ni_table); /* * Update the time stamp. As a side effect, it * also makes sure that the timer will not change * ni->ni_rxfrag[0] for at least 1 second, or in * other words, for the remaining of this function. */ ni->ni_rxfragstamp = jiffies; IEEE80211_NODE_UNLOCK_IRQ(ni->ni_table); /* * Validate that fragment is in order and * related to the previous ones. */ if (ni->ni_rxfrag[0]) { struct ieee80211_frame *lwh; lwh = (struct ieee80211_frame *) ni->ni_rxfrag[0]->data; last_rxseq = le16_to_cpu(*(u_int16_t *)lwh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; last_fragno = le16_to_cpu(*(u_int16_t *)lwh->i_seq) & IEEE80211_SEQ_FRAG_MASK; if (rxseq != last_rxseq || fragno != last_fragno + 1 || (!IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1)) || (!IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2)) || (ni->ni_rxfrag[0]->end - ni->ni_rxfrag[0]->tail < skb->len)) { /* * Unrelated fragment or no space for it, * clear current fragments */ dev_kfree_skb(ni->ni_rxfrag[0]); ni->ni_rxfrag[0] = NULL; } } /* If this is the first fragment */ if (ni->ni_rxfrag[0] == NULL && fragno == 0) { ni->ni_rxfrag[0] = skb; /* If more frags are coming */ if (more_frag) { if (skb_is_nonlinear(skb)) { /* * We need a continous buffer to * assemble fragments */ ni->ni_rxfrag[0] = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb(skb); } /* * Check that we have enough space to hold * incoming fragments * XXX 4-address/QoS frames? */ else if (skb->end - skb->head < ni->ni_vap->iv_dev->mtu + hdrlen) { ni->ni_rxfrag[0] = skb_copy_expand(skb, 0, (ni->ni_vap->iv_dev->mtu + hdrlen) - (skb->end - skb->head), GFP_ATOMIC); dev_kfree_skb(skb); } } } else { if (ni->ni_rxfrag[0]) { struct ieee80211_frame *lwh = (struct ieee80211_frame *) ni->ni_rxfrag[0]->data; /* * We know we have enough space to copy, * we've verified that before */ /* Copy current fragment at end of previous one */ memcpy(ni->ni_rxfrag[0]->tail, skb->data + hdrlen, skb->len - hdrlen); /* Update tail and length */ skb_put(ni->ni_rxfrag[0], skb->len - hdrlen); /* Keep a copy of last sequence and fragno */ *(u_int16_t *) lwh->i_seq = *(u_int16_t *) wh->i_seq; } /* we're done with the fragment */ dev_kfree_skb(skb); } if (more_frag) { /* More to come */ skb = NULL; } else { /* Last fragment received, we're done! */ skb = ni->ni_rxfrag[0]; ni->ni_rxfrag[0] = NULL; } return skb;}static void ieee80211_deliver_data(struct ieee80211_node *ni, struct sk_buff *skb){ struct ieee80211vap *vap = ni->ni_vap; struct net_device *dev = vap->iv_dev; struct ether_header *eh = (struct ether_header *) skb->data;#ifdef ATH_SUPERG_XR /* * if it is a XR vap, send the data to associated normal net * device. XR vap has a net device which is not registered with * OS. */ if (vap->iv_xrvap && vap->iv_flags & IEEE80211_F_XR) dev = vap->iv_xrvap->iv_dev;#endif /* perform as a bridge within the vap */ /* XXX intra-vap bridging only */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) { struct sk_buff *skb1 = NULL; if (ETHER_IS_MULTICAST(eh->ether_dhost)) skb1 = skb_copy(skb, GFP_ATOMIC); else { /* * Check if destination is associated with the * same vap and authorized to receive traffic. * Beware of traffic destined for the vap itself; * sending it will not work; just let it be * delivered normally. */ struct ieee80211_node *ni1 = ieee80211_find_node( &vap->iv_ic->ic_sta, eh->ether_dhost); if (ni1 != NULL) { if (ni1->ni_vap == vap && ieee80211_node_is_authorized(ni1) && ni1 != vap->iv_bss) { skb1 = skb; skb = NULL; } /* XXX statistic? */ ieee80211_free_node(ni1); } } if (skb1 != NULL) { skb1->dev = dev; skb1->mac.raw = skb1->data; skb1->nh.raw = skb1->data + sizeof(struct ether_header); skb1->protocol = __constant_htons(ETH_P_802_2); /* XXX insert vlan tag before queue it? */ dev_queue_xmit(skb1); } } if (skb != NULL) { skb->dev = dev; #ifdef USE_HEADERLEN_RESV skb->protocol = ath_eth_type_trans(skb, dev);#else skb->protocol = eth_type_trans(skb, dev);#endif if (ni->ni_vlan != 0 && vap->iv_vlgrp != NULL) { /* attach vlan tag */ vlan_hwaccel_receive_skb(skb, vap->iv_vlgrp, ni->ni_vlan); } else netif_rx(skb); dev->last_rx = jiffies; }}static struct sk_buff *ieee80211_decap(struct ieee80211vap *vap, struct sk_buff *skb, int hdrlen){ struct ieee80211_qosframe_addr4 wh; /* Max size address frames */ struct ether_header *eh; struct llc *llc; u_short ether_type = 0; memcpy(&wh, skb->data, hdrlen); /* Only copy hdrlen over */ llc = (struct llc *) skb_pull(skb, hdrlen); if (skb->len >= LLC_SNAPFRAMELEN && llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_snap.org_code[0] == 0 && llc->llc_snap.org_code[1] == 0 && llc->llc_snap.org_code[2] == 0) { ether_type = llc->llc_un.type_snap.ether_type; skb_pull(skb, LLC_SNAPFRAMELEN); llc = NULL; } eh = (struct ether_header *) skb_push(skb, sizeof(struct ether_header)); switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3); break; case IEEE80211_FC1_DIR_DSTODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4); break; } if (!ALIGNED_POINTER(skb->data + sizeof(*eh), u_int32_t)) { struct sk_buff *n; /* XXX does this always work? */ n = skb_copy(skb, GFP_ATOMIC); dev_kfree_skb(skb); if (n == NULL) return NULL; skb = n; eh = (struct ether_header *) skb->data; } if (llc != NULL) eh->ether_type = htons(skb->len - sizeof(*eh)); else eh->ether_type = ether_type; return skb;}/* * Install received rate set information in the node's state block. */intieee80211_setup_rates(struct ieee80211_node *ni, const u_int8_t *rates, const u_int8_t *xrates, int flags){ struct ieee80211_rateset *rs = &ni->ni_rates; memset(rs, 0, sizeof(*rs)); rs->rs_nrates = rates[1]; memcpy(rs->rs_rates, rates + 2, rs->rs_nrates); if (xrates != NULL) { u_int8_t nxrates; /* * Tack on 11g extended supported rate element. */ nxrates = xrates[1]; if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) { struct ieee80211vap *vap = ni->ni_vap; nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates; IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni, "extended rate set too large;" " only using %u of %u rates", nxrates, xrates[1]); vap->iv_stats.is_rx_rstoobig++; } memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates); rs->rs_nrates += nxrates; } return ieee80211_fix_rate(ni, flags);}static voidieee80211_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh, int rssi, u_int32_t rstamp, u_int16_t seq, u_int16_t status){ struct ieee80211vap *vap = ni->ni_vap; if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "open auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* XXX hack to workaround calling convention */ /* XXX To send the frame to the requesting STA, we have to * create a node for the station that we're going to reject. * The node will be freed automatically */ if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) return; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: %p<%s> refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)); } IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (IEEE80211_STATUS_ALG<<16)); return; } } switch (vap->iv_opmode) { case IEEE80211_M_IBSS: if (vap->iv_state != IEEE80211_S_RUN || seq != IEEE80211_AUTH_OPEN_REQUEST) { vap->iv_stats.is_rx_bad_auth++; return; } ieee80211_new_state(vap, IEEE80211_S_AUTH, wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); break; case IEEE80211_M_AHDEMO: case IEEE80211_M_WDS: /* should not come here */ break; case IEEE80211_M_HOSTAP: if (vap->iv_state != IEEE80211_S_RUN || seq != IEEE80211_AUTH_OPEN_REQUEST) { vap->iv_stats.is_rx_bad_auth++; return; } /* always accept open authentication requests */ if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2); if (ni == NULL) return; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: %p<%s> refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)); } else if ((ni->ni_flags & IEEE80211_NODE_AREF) == 0) (void) ieee80211_ref_node(ni); /* * Mark the node as referenced to reflect that it's * reference count has been bumped to ensure it remains * after the transaction completes. */ ni->ni_flags |= IEEE80211_NODE_AREF; IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, seq + 1); IEEE80211_NOTE(vap, IEEE80211_MSG_DEBUG | IEEE80211_MSG_AUTH, ni, "station authenticated (%s)", "open"); /* * When 802.1x is not in use mark the port * authorized at this point so traffic can flow. */ if (ni->ni_authmode != IEEE80211_AUTH_8021X) ieee80211_node_authorize(ni); break; case IEEE80211_M_STA: if (vap->iv_state != IEEE80211_S_AUTH || seq != IEEE80211_AUTH_OPEN_RESPONSE) { vap->iv_stats.is_rx_bad_auth++; return; }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -