📄 ieee80211_input.c.svn-base
字号:
ieee80211_dev_kfree_skb(&skb); return type; } EXPORT_SYMBOL(ieee80211_input_all); /* * Determines whether a frame should be accepted, based on information * about the frame's origin and encryption, and policy for this vap. */static int accept_data_frame(struct ieee80211vap *vap, struct ieee80211_node *ni, struct ieee80211_key *key, struct sk_buff *skb, struct ether_header *eh){#define IS_EAPOL(eh) ((eh)->ether_type == __constant_htons(ETHERTYPE_PAE))#define PAIRWISE_SET(vap) ((vap)->iv_nw_keys[0].wk_cipher != &ieee80211_cipher_none) if (IS_EAPOL(eh)) { /* encrypted eapol is always OK */ if (key) return 1; /* cleartext eapol is OK if we don't have pairwise keys yet */ if (!PAIRWISE_SET(vap)) return 1; /* cleartext eapol is OK if configured to allow it */ if (!IEEE80211_VAP_DROPUNENC_EAPOL(vap)) return 1; /* cleartext eapol is OK if other unencrypted is OK */ if (!(vap->iv_flags & IEEE80211_F_DROPUNENC)) return 1; /* not OK */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", ntohs(eh->ether_type), skb->len); vap->iv_stats.is_rx_unauth++; vap->iv_devstats.rx_errors++; IEEE80211_NODE_STAT(ni, rx_unauth); return 0; } if (!ieee80211_node_is_authorized(ni)) { /* * Deny any non-PAE frames received prior to * authorization. For open/shared-key * authentication the port is mark authorized * after authentication completes. For 802.1x * the port is not marked authorized by the * authenticator until the handshake has completed. */ IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_INPUT, eh->ether_shost, "data", "unauthorized port: ether type 0x%x len %u", ntohs(eh->ether_type), skb->len); vap->iv_stats.is_rx_unauth++; vap->iv_devstats.rx_errors++; IEEE80211_NODE_STAT(ni, rx_unauth); return 0; } else { /* * When denying unencrypted frames, discard * any non-PAE frames received without encryption. */ if ((vap->iv_flags & IEEE80211_F_DROPUNENC) && key == NULL) { IEEE80211_NODE_STAT(ni, rx_unencrypted); return 0; } } return 1;#undef IS_EAPOL#undef PAIRWISE_SET}/* * This function reassemble fragments using the skb of the 1st fragment, * if large enough. If not, a new skb is allocated to hold incoming * fragments. * * Fragments are copied at the end of the previous fragment. A different * strategy could have been used, where a non-linear skb is allocated and * fragments attached to that skb. */static struct sk_buff *ieee80211_defrag(struct ieee80211_node *ni, struct sk_buff *skb, int hdrlen){ struct ieee80211_frame *wh = (struct ieee80211_frame *)skb->data; u_int16_t rxseq, last_rxseq; u_int8_t fragno, last_fragno; u_int8_t more_frag = wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG; rxseq = le16_to_cpu(*(__le16 *)wh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; fragno = le16_to_cpu(*(__le16 *)wh->i_seq) & IEEE80211_SEQ_FRAG_MASK; /* Quick way out, if there's nothing to defragment */ if (!more_frag && fragno == 0 && ni->ni_rxfrag == NULL) return skb; /* * Remove frag to ensure it doesn't get reaped by timer. */ if (ni->ni_table == NULL) { /* * Should never happen. If the node is orphaned (not in * the table) then input packets should not reach here. * Otherwise, a concurrent request that yanks the table * should be blocked by other interlocking and/or by first * shutting the driver down. Regardless, be defensive * here and just bail */ /* XXX need msg+stat */ ieee80211_dev_kfree_skb(&skb); return NULL; } /* * Update the time stamp. As a side effect, it * also makes sure that the timer will not change * ni->ni_rxfrag for at least 1 second, or in * other words, for the remaining of this function. * XXX HUGE HORRIFIC HACK */ ni->ni_rxfragstamp = jiffies; /* * Validate that fragment is in order and * related to the previous ones. */ if (ni->ni_rxfrag) { struct ieee80211_frame *lwh; lwh = (struct ieee80211_frame *)ni->ni_rxfrag->data; last_rxseq = le16_to_cpu(*(__le16 *)lwh->i_seq) >> IEEE80211_SEQ_SEQ_SHIFT; last_fragno = le16_to_cpu(*(__le16 *)lwh->i_seq) & IEEE80211_SEQ_FRAG_MASK; if (rxseq != last_rxseq || fragno != last_fragno + 1 || (!IEEE80211_ADDR_EQ(wh->i_addr1, lwh->i_addr1)) || (!IEEE80211_ADDR_EQ(wh->i_addr2, lwh->i_addr2)) || (ni->ni_rxfrag->end - ni->ni_rxfrag->tail < skb->len)) { /* * Unrelated fragment or no space for it, * clear current fragments */ ieee80211_dev_kfree_skb(&ni->ni_rxfrag); } } /* If this is the first fragment */ if (ni->ni_rxfrag == NULL && fragno == 0) { ni->ni_rxfrag = skb; /* If more frags are coming */ if (more_frag) { if (skb_is_nonlinear(skb)) { /* * We need a continous buffer to * assemble fragments */ ni->ni_rxfrag = skb_copy(skb, GFP_ATOMIC); if (ni->ni_rxfrag) { ieee80211_skb_copy_noderef(skb, ni->ni_rxfrag); ieee80211_dev_kfree_skb(&skb); } } /* * Check that we have enough space to hold * incoming fragments * XXX 4-address/QoS frames? */ else if ((skb_end_pointer(skb) - skb->head) < (ni->ni_vap->iv_dev->mtu + hdrlen)) { ni->ni_rxfrag = skb_copy_expand(skb, 0, (ni->ni_vap->iv_dev->mtu + hdrlen) - (skb_end_pointer(skb) - skb->head), GFP_ATOMIC); if (ni->ni_rxfrag) ieee80211_skb_copy_noderef(skb, ni->ni_rxfrag); ieee80211_dev_kfree_skb(&skb); } } } else { if (ni->ni_rxfrag) { struct ieee80211_frame *lwh = (struct ieee80211_frame *) ni->ni_rxfrag->data; /* * We know we have enough space to copy, * we've verified that before */ /* Copy current fragment at end of previous one */ memcpy(skb_tail_pointer(ni->ni_rxfrag), skb->data + hdrlen, skb->len - hdrlen); /* Update tail and length */ skb_put(ni->ni_rxfrag, skb->len - hdrlen); /* Keep a copy of last sequence and fragno */ *(__le16 *)lwh->i_seq = *(__le16 *)wh->i_seq; } /* we're done with the fragment */ ieee80211_dev_kfree_skb(&skb); } if (more_frag) { /* More to come */ skb = NULL; } else { /* Last fragment received, we're done! */ skb = ni->ni_rxfrag; ni->ni_rxfrag = NULL; } return skb;}static voidieee80211_deliver_data(struct ieee80211_node *ni, struct sk_buff *skb){ struct ieee80211vap *vap = ni->ni_vap; struct net_device *dev = vap->iv_dev; struct ether_header *eh = (struct ether_header *)skb->data; struct ieee80211_node *tni; int ret;#ifdef ATH_SUPERG_XR /* * if it is a XR vap, send the data to associated normal net * device. XR vap has a net device which is not registered with * OS. */ if (vap->iv_xrvap && vap->iv_flags & IEEE80211_F_XR) dev = vap->iv_xrvap->iv_dev;#endif /* perform as a bridge within the vap */ /* XXX intra-vap bridging only */ if (vap->iv_opmode == IEEE80211_M_HOSTAP && (vap->iv_flags & IEEE80211_F_NOBRIDGE) == 0) { struct sk_buff *skb1 = NULL; if (ETHER_IS_MULTICAST(eh->ether_dhost)) { /* Create a SKB for the BSS to send out. */ skb1 = skb_copy(skb, GFP_ATOMIC); if (skb1) SKB_NI(skb1) = ieee80211_ref_node(vap->iv_bss); } else { /* Check if destination is associated with the * same VAP and authorized to receive traffic. * Beware of traffic destined for the VAP itself; * sending it will not work; just let it be * delivered normally. */ struct ieee80211_node *ni1 = ieee80211_find_node( &vap->iv_ic->ic_sta, eh->ether_dhost); if (ni1 != NULL) { if ((ni1->ni_vap == vap) && (ni1 != vap->iv_bss) && ieee80211_node_is_authorized(ni1)) { skb1 = skb; skb = NULL; } /* XXX: statistic? */ ieee80211_unref_node(&ni1); } } if (skb1 != NULL) { skb1->dev = dev; skb_reset_mac_header(skb1); skb_set_network_header(skb1, sizeof(struct ether_header)); skb1->protocol = __constant_htons(ETH_P_802_2); /* This SKB is being emitted to the physical/parent * device, which maintains node references. However, * there is kernel code in between which does not. * Therefore, the ref. is cleaned if the SKB is * dropped. */ tni = SKB_NI(skb1); /* XXX: Insert vlan tag before queuing it? */ if (dev_queue_xmit(skb1) == NET_XMIT_DROP) { vap->iv_devstats.tx_dropped++; if (tni != NULL) ieee80211_unref_node(&tni); } /* SKB is no longer ours, either way after dev_queue_xmit. */ skb1 = NULL; } } if (skb != NULL) { vap->iv_devstats.rx_packets++; vap->iv_devstats.rx_bytes += skb->len; dev->last_rx = jiffies; skb->dev = dev;#ifdef USE_HEADERLEN_RESV skb->protocol = ath_eth_type_trans(skb, dev);#else skb->protocol = eth_type_trans(skb, dev);#endif tni = SKB_NI(skb); if ((ni->ni_vlan != 0) && (vap->iv_vlgrp != NULL)) /* Attach VLAN tag. */ ret = vlan_hwaccel_rx(skb, vap->iv_vlgrp, ni->ni_vlan); else ret = netif_rx(skb); if (ret == NET_RX_DROP) vap->iv_devstats.rx_dropped++; if (tni != NULL) ieee80211_unref_node(&tni); skb = NULL; /* SKB is no longer ours */ }}/* This function removes the 802.11 header, including LLC/SNAP headers and * replaces it with an Ethernet II header. */static struct sk_buff *ieee80211_decap(struct ieee80211vap *vap, struct sk_buff *skb, int hdrlen){ const struct llc snap_hdr = {.llc_dsap = LLC_SNAP_LSAP, .llc_ssap = LLC_SNAP_LSAP, .llc_snap.control = LLC_UI, .llc_snap.org_code = {0x0, 0x0, 0x0}}; struct ieee80211_qosframe_addr4 wh; /* Max size address frames */ struct ether_header *eh; struct llc *llc; __be16 ether_type = 0; memcpy(&wh, skb->data, hdrlen); /* Make a copy of the variably sized .11 header */ llc = (struct llc *)skb_pull(skb, hdrlen); /* XXX: For some unknown reason some APs think they are from DEC and * use an OUI of 00-00-f8. This should be killed as soon as sanity is * restored. */ if ((skb->len >= LLC_SNAPFRAMELEN) && (memcmp(&snap_hdr, llc, 5) == 0) && ((llc->llc_snap.org_code[2] == 0x0) || (llc->llc_snap.org_code[2] == 0xf8))) { ether_type = llc->llc_un.type_snap.ether_type; skb_pull(skb, LLC_SNAPFRAMELEN); llc = NULL; } eh = (struct ether_header *)skb_push(skb, sizeof(struct ether_header)); switch (wh.i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_TODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr2); break; case IEEE80211_FC1_DIR_FROMDS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr1); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr3); break; case IEEE80211_FC1_DIR_DSTODS: IEEE80211_ADDR_COPY(eh->ether_dhost, wh.i_addr3); IEEE80211_ADDR_COPY(eh->ether_shost, wh.i_addr4); break; } if (llc != NULL) eh->ether_type = htons(skb->len - sizeof(*eh)); else eh->ether_type = ether_type; return skb;}/* * Install received rate set information in the node's state block. */intieee80211_setup_rates(struct ieee80211_node *ni, const u_int8_t *rates, const u_int8_t *xrates, int flags){ struct ieee80211_rateset *rs = &ni->ni_rates; memset(rs, 0, sizeof(*rs)); rs->rs_nrates = rates[1]; memcpy(rs->rs_rates, rates + 2, rs->rs_nrates); if (xrates != NULL) { u_int8_t nxrates; /* * Tack on 11g extended supported rate element. */ nxrates = xrates[1]; if (rs->rs_nrates + nxrates > IEEE80211_RATE_MAXSIZE) { struct ieee80211vap *vap = ni->ni_vap; nxrates = IEEE80211_RATE_MAXSIZE - rs->rs_nrates; IEEE80211_NOTE(vap, IEEE80211_MSG_XRATE, ni, "extended rate set too large;" " only using %u of %u rates", nxrates, xrates[1]); vap->iv_stats.is_rx_rstoobig++; } memcpy(rs->rs_rates + rs->rs_nrates, xrates+2, nxrates); rs->rs_nrates += nxrates; } return ieee80211_fix_rate(ni, flags);}static voidieee80211_auth_open(struct ieee80211_node *ni, struct ieee80211_frame *wh, int rssi, u_int64_t rtsf, u_int16_t seq, u_int16_t status){ struct ieee80211vap *vap = ni->ni_vap; unsigned int tmpnode = 0; if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { IEEE80211_DISCARD_MAC(vap, IEEE80211_MSG_AUTH, ni->ni_macaddr, "open auth", "bad sta auth mode %u", ni->ni_authmode); vap->iv_stats.is_rx_bad_auth++; /* XXX maybe a unique error? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP) { if (ni == vap->iv_bss) { ni = ieee80211_dup_bss(vap, wh->i_addr2, 0); if (ni == NULL) return; tmpnode = 1; } IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_AUTH, (seq + 1) | (IEEE80211_STATUS_ALG << 16)); if (tmpnode) ieee80211_unref_node(&ni); return; } } switch (vap->iv_opmode) { case IEEE80211_M_IBSS: if (vap->iv_state != IEEE80211_S_RUN || seq != IEEE80211_AUTH_OPEN_REQUEST) { vap->iv_stats.is_rx_bad_auth++; return; } ieee80211_new_state(vap, IEEE80211_S_AUTH, wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); break; case IEEE80211_M_AHDEMO: case IEEE80211_M_WDS:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -