📄 ip_input.c
字号:
switch (natpt_in4(m, &m1)) {
case IPPROTO_IP: /* this packet is not changed */
goto checkaddresses;
case IPPROTO_IPV4:
ip_forward(m1, 0);
break;
case IPPROTO_IPV6:
ip6_forward(m1, 1);
break;
case IPPROTO_DONE: /* discard without free */
return;
case IPPROTO_MAX: /* discard this packet */
default:
break;
}
if (m != m1)
m_freem(m);
return;
}
checkaddresses:;
#endif
/*
* Check our list of addresses, to see if the packet is for us.
* If we don't have any addresses, assume any unicast packet
* we receive might be for us (and let the upper layers deal
* with it).
*/
if (TAILQ_EMPTY(&in_ifaddrhead) &&
(m->m_flags & (M_MCAST|M_BCAST)) == 0)
goto ours;
/*
* Cache the destination address of the packet; this may be
* changed by use of 'ipfw fwd'.
*/
pkt_dst = ip_fw_fwd_addr == NULL ?
ip->ip_dst : ip_fw_fwd_addr->sin_addr;
/*
* Enable a consistency check between the destination address
* and the arrival interface for a unicast packet (the RFC 1122
* strong ES model) if IP forwarding is disabled and the packet
* is not locally generated and the packet is not subject to
* 'ipfw fwd'.
*
* XXX - Checking also should be disabled if the destination
* address is ipnat'ed to a different interface.
*
* XXX - Checking is incompatible with IP aliases added
* to the loopback interface instead of the interface where
* the packets are received.
*/
checkif = ip_checkinterface && (ipforwarding == 0) &&
((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) &&
(ip_fw_fwd_addr == NULL);
TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
#define satosin(sa) ((struct sockaddr_in *)(sa))
#ifdef BOOTP_COMPAT
if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY)
goto ours;
#endif
/*
* If the address matches, verify that the packet
* arrived via the correct interface if checking is
* enabled.
*/
if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr &&
(!checkif || ia->ia_ifp == m->m_pkthdr.rcvif))
goto ours;
/*
* Only accept broadcast packets that arrive via the
* matching interface. Reception of forwarded directed
* broadcasts would be handled via ip_forward() and
* ether_output() with the loopback into the stack for
* SIMPLEX interfaces handled by ether_output().
*/
if (ia->ia_ifp == m->m_pkthdr.rcvif &&
ia->ia_ifp && ia->ia_ifp->if_flags & IFF_BROADCAST) {
if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
pkt_dst.s_addr)
goto ours;
if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr)
goto ours;
}
}
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
struct in_multi *inm;
if (ip_mrouter) {
/*
* If we are acting as a multicast router, all
* incoming multicast packets are passed to the
* kernel-level multicast forwarding function.
* The packet is returned (relatively) intact; if
* ip_mforward() returns a non-zero value, the packet
* must be discarded, else it may be accepted below.
*/
if (ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) {
ipstat.ips_cantforward++;
m_freem(m);
return;
}
/*
* The process-level routing demon needs to receive
* all multicast IGMP packets, whether or not this
* host belongs to their destination groups.
*/
if (ip->ip_p == IPPROTO_IGMP)
goto ours;
ipstat.ips_forward++;
}
/*
* See if we belong to the destination multicast group on the
* arrival interface.
*/
IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
if (inm == NULL) {
ipstat.ips_notmember++;
m_freem(m);
return;
}
goto ours;
}
if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
goto ours;
if (ip->ip_dst.s_addr == INADDR_ANY)
goto ours;
#if defined(NFAITH) && 0 < NFAITH
/*
* FAITH(Firewall Aided Internet Translator)
*/
if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
if (ip_keepfaith) {
if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
goto ours;
}
m_freem(m);
return;
}
#endif
/*
* Not for us; forward if possible and desirable.
*/
if (ipforwarding == 0) {
ipstat.ips_cantforward++;
m_freem(m);
} else
ip_forward(m, 0);
#ifdef IPFIREWALL_FORWARD
ip_fw_fwd_addr = NULL;
#endif
return;
ours:
/* Count the packet in the ip address stats */
if (ia != NULL) {
ia->ia_ifa.if_ipackets++;
ia->ia_ifa.if_ibytes += m->m_pkthdr.len;
}
/*
* If offset or IP_MF are set, must reassemble.
* Otherwise, nothing need be done.
* (We could look in the reassembly queue to see
* if the packet was previously fragmented,
* but it's not worth the time; just let them time out.)
*/
if (ip->ip_off & (IP_MF | IP_OFFMASK | IP_RF)) {
#if 0 /*
* Reassembly should be able to treat a mbuf cluster, for later
* operation of contiguous protocol headers on the cluster. (KAME)
*/
if (m->m_flags & M_EXT) { /* XXX */
if ((m = m_pullup(m, hlen)) == 0) {
ipstat.ips_toosmall++;
#ifdef IPFIREWALL_FORWARD
ip_fw_fwd_addr = NULL;
#endif
return;
}
ip = mtod(m, struct ip *);
}
#endif
sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
/*
* Look for queue of fragments
* of this datagram.
*/
for (fp = ipq[sum].next; fp != &ipq[sum]; fp = fp->next)
if (ip->ip_id == fp->ipq_id &&
ip->ip_src.s_addr == fp->ipq_src.s_addr &&
ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
ip->ip_p == fp->ipq_p)
goto found;
fp = 0;
/* check if there's a place for the new queue */
if (nipq > maxnipq) {
/*
* drop something from the tail of the current queue
* before proceeding further
*/
if (ipq[sum].prev == &ipq[sum]) { /* gak */
for (i = 0; i < IPREASS_NHASH; i++) {
if (ipq[i].prev != &ipq[i]) {
ip_freef(ipq[i].prev);
break;
}
}
} else
ip_freef(ipq[sum].prev);
}
found:
/*
* Adjust ip_len to not reflect header,
* set ip_mff if more fragments are expected,
* convert offset of this to bytes.
*/
ip->ip_len -= hlen;
mff = (ip->ip_off & IP_MF) != 0;
if (mff) {
/*
* Make sure that fragments have a data length
* that's a non-zero multiple of 8 bytes.
*/
if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
ipstat.ips_toosmall++; /* XXX */
goto bad;
}
m->m_flags |= M_FRAG;
}
ip->ip_off <<= 3;
/*
* If datagram marked as having more fragments
* or if this is not the first fragment,
* attempt reassembly; if it succeeds, proceed.
*/
if (mff || ip->ip_off) {
ipstat.ips_fragments++;
m->m_pkthdr.header = ip;
#ifdef IPDIVERT
m = ip_reass(m,
fp, &ipq[sum], &divert_info, &divert_cookie);
#else
m = ip_reass(m, fp, &ipq[sum]);
#endif
if (m == 0) {
#ifdef IPFIREWALL_FORWARD
ip_fw_fwd_addr = NULL;
#endif
return;
}
ipstat.ips_reassembled++;
ip = mtod(m, struct ip *);
/* Get the header length of the reassembled packet */
hlen = IP_VHL_HL(ip->ip_vhl) << 2;
#ifdef IPDIVERT
/* Restore original checksum before diverting packet */
if (divert_info != 0) {
ip->ip_len += hlen;
HTONS(ip->ip_len);
HTONS(ip->ip_off);
ip->ip_sum = 0;
if (hlen == sizeof(struct ip))
ip->ip_sum = in_cksum_hdr(ip);
else
ip->ip_sum = in_cksum(m, hlen);
NTOHS(ip->ip_off);
NTOHS(ip->ip_len);
ip->ip_len -= hlen;
}
#endif
} else
if (fp)
ip_freef(fp);
} else
ip->ip_len -= hlen;
#ifdef IPDIVERT
/*
* Divert or tee packet to the divert protocol if required.
*
* If divert_info is zero then cookie should be too, so we shouldn't
* need to clear them here. Assume divert_packet() does so also.
*/
if (divert_info != 0) {
struct mbuf *clone = NULL;
/* Clone packet if we're doing a 'tee' */
if ((divert_info & IP_FW_PORT_TEE_FLAG) != 0)
clone = m_dup(m, M_DONTWAIT);
/* Restore packet header fields to original values */
ip->ip_len += hlen;
HTONS(ip->ip_len);
HTONS(ip->ip_off);
/* Deliver packet to divert input routine */
ip_divert_cookie = divert_cookie;
divert_packet(m, 1, divert_info & 0xffff);
ipstat.ips_delivered++;
/* If 'tee', continue with original packet */
if (clone == NULL)
return;
m = clone;
ip = mtod(m, struct ip *);
}
#endif
#ifdef IPSEC
/*
* enforce IPsec policy checking if we are seeing last header.
* note that we do not visit this with protocols with pcb layer
* code - like udp/tcp/raw ip.
*/
if ((inetsw[ip_protox[ip->ip_p]].pr_flags & PR_LASTHDR) != 0 &&
ipsec4_in_reject(m, NULL)) {
ipsecstat.in_polvio++;
goto bad;
}
#endif
/*
* Switch out to protocol's input routine.
*/
ipstat.ips_delivered++;
{
int off = hlen;
(*inetsw[ip_protox[ip->ip_p]].pr_input)(m, off);
#ifdef IPFIREWALL_FORWARD
ip_fw_fwd_addr = NULL; /* tcp needed it */
#endif
return;
}
bad:
#ifdef IPFIREWALL_FORWARD
ip_fw_fwd_addr = NULL;
#endif
m_freem(m);
}
/*
* IP software interrupt routine - to go away sometime soon
*/
static void
ipintr(void)
{
int s;
struct mbuf *m;
while(1) {
s = splimp();
IF_DEQUEUE(&ipintrq, m);
splx(s);
if (m == 0)
return;
ip_input(m);
}
}
/*
* Take incoming datagram fragment and try to reassemble it into
* whole datagram. If a chain for reassembly of this datagram already
* exists, then it is given as fp; otherwise have to make a chain.
*
* When IPDIVERT enabled, keep additional state with each packet that
* tells us if we need to divert or tee the packet we're building.
*/
static struct mbuf *
#ifdef IPDIVERT
ip_reass(m, fp, where, divinfo, divcookie)
#else
ip_reass(m, fp, where)
#endif
register struct mbuf *m;
register struct ipq *fp;
struct ipq *where;
#ifdef IPDIVERT
u_int32_t *divinfo;
u_int16_t *divcookie;
#endif
{
struct ip *ip = mtod(m, struct ip *);
register struct mbuf *p = 0, *q, *nq;
struct mbuf *t;
int hlen = IP_VHL_HL(ip->ip_vhl) << 2;
int i, next;
/*
* Presence of header sizes in mbufs
* would confuse code below.
*/
m->m_data += hlen;
m->m_len -= hlen;
/*
* If first fragment to arrive, create a reassembly queue.
*/
if (fp == 0) {
/*
* Enforce upper bound on number of fragmented packets
* for which we attempt reassembly;
* If maxfrag is 0, never accept fragments.
* If maxfrag is -1, accept all fragments without limitation.
*/
if ((ip_maxfragpackets >= 0) && (ip_nfragpackets >= ip_maxfragpackets))
goto dropfrag;
ip_nfragpackets++;
if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL)
goto dropfrag;
fp = mtod(t, struct ipq *);
insque(fp, where);
nipq++;
fp->ipq_ttl = IPFRAGTTL;
fp->ipq_p = ip->ip_p;
fp->ipq_id = ip->ip_id;
fp->ipq_src = ip->ip_src;
fp->ipq_dst = ip->ip_dst;
fp->ipq_frags = m;
m->m_nextpkt = NULL;
#ifdef IPDIVERT
fp->ipq_div_info = 0;
fp->ipq_div_cookie = 0;
#endif
goto inserted;
}
#define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
/*
* Find a segment which begins after this one does.
*/
for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
if (GETIP(q)->ip_off > ip->ip_off)
break;
/*
* If there is a preceding segment, it may provide some of
* our data already. If so, drop the data from the incoming
* segment. If it provides all of our data, drop us, otherwise
* stick new segment in the proper place.
*
* If some of the data is dropped from the the preceding
* segment, then it's checksum is invalidated.
*/
if (p) {
i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
if (i > 0) {
if (i >= ip->ip_len)
goto dropfrag;
m_adj(m, i);
m->m_pkthdr.csum_flags = 0;
ip->ip_off += i;
ip->ip_len -= i;
}
m->m_nextpkt = p->m_nextpkt;
p->m_nextpkt = m;
} else {
m->m_nextpkt = fp->ipq_frags;
fp->ipq_frags = m;
}
/*
* While we overlap succeeding segments trim them or,
* if they are completely covered, dequeue them.
*/
for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
q = nq) {
i = (ip->ip_off + ip->ip_len) -
GETIP(q)->ip_off;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -