📄 tcp.c
字号:
skb->len = 0; skb->sk = sk; skb->free = 0; buff = skb->data; /* * FIXME: we need to optimize this. * Perhaps some hints here would be good. */ tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, IPPROTO_TCP, sk->opt, skb->mem_len,sk->ip_tos,sk->ip_ttl); if (tmp < 0 ) { prot->wfree(sk, skb->mem_addr, skb->mem_len); release_sock(sk); DPRINTF((DBG_TCP, "tcp_write: return 6\n")); if (copied) return(copied); return(tmp); } skb->len += tmp; skb->dev = dev; buff += tmp; skb->h.th =(struct tcphdr *) buff; tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy); if (tmp < 0) { prot->wfree(sk, skb->mem_addr, skb->mem_len); release_sock(sk); DPRINTF((DBG_TCP, "tcp_write: return 7\n")); if (copied) return(copied); return(tmp); } if (flags & MSG_OOB) { ((struct tcphdr *)buff)->urg = 1; ((struct tcphdr *)buff)->urg_ptr = ntohs(copy); } skb->len += tmp; memcpy_fromfs(buff+tmp, from, copy); from += copy; copied += copy; len -= copy; skb->len += copy; skb->free = 0; sk->write_seq += copy; if (send_tmp != NULL && sk->packets_out) { tcp_enqueue_partial(send_tmp, sk); continue; } tcp_send_skb(sk, skb); } sk->err = 0;/* * Nagles rule. Turn Nagle off with TCP_NODELAY for highly * interactive fast network servers. It's meant to be on and * it really improves the throughput though not the echo time * on my slow slip link - Alan */ /* Avoid possible race on send_tmp - c/o Johannes Stille */ if(sk->partial && ((!sk->packets_out) /* If not nagling we can send on the before case too.. */ || (sk->nonagle && before(sk->write_seq , sk->window_seq)) )) tcp_send_partial(sk); /* -- */ release_sock(sk); DPRINTF((DBG_TCP, "tcp_write: return 8\n")); return(copied);}static inttcp_sendto(struct sock *sk, unsigned char *from, int len, int nonblock, unsigned flags, struct sockaddr_in *addr, int addr_len){ struct sockaddr_in sin; if (addr_len < sizeof(sin)) return(-EINVAL); memcpy_fromfs(&sin, addr, sizeof(sin)); if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL); if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL); if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL); return(tcp_write(sk, from, len, nonblock, flags));}static voidtcp_read_wakeup(struct sock *sk){ int tmp; struct device *dev = NULL; struct tcphdr *t1; struct sk_buff *buff; DPRINTF((DBG_TCP, "in tcp read wakeup\n")); if (!sk->ack_backlog) return; /* * FIXME: we need to put code here to prevent this routine from * being called. Being called once in a while is ok, so only check * if this is the second time in a row. */ /* * We need to grab some memory, and put together an ack, * and then put it into the queue to be sent. */ buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); if (buff == NULL) { /* Try again real soon. */ reset_timer(sk, TIME_WRITE, 10); return; } buff->mem_addr = buff; buff->mem_len = MAX_ACK_SIZE; buff->len = sizeof(struct tcphdr); buff->sk = sk; /* Put in the IP header and routing stuff. */ tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev, IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl); if (tmp < 0) { buff->free=1; sk->prot->wfree(sk, buff->mem_addr, buff->mem_len); return; } buff->len += tmp; t1 =(struct tcphdr *)(buff->data +tmp); memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1)); t1->seq = htonl(sk->sent_seq); t1->ack = 1; t1->res1 = 0; t1->res2 = 0; t1->rst = 0; t1->urg = 0; t1->syn = 0; t1->psh = 0; sk->ack_backlog = 0; sk->bytes_rcv = 0; sk->window = tcp_select_window(sk);/*sk->prot->rspace(sk);*/ t1->window = ntohs(sk->window); t1->ack_seq = ntohl(sk->acked_seq); t1->doff = sizeof(*t1)/4; tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk); sk->prot->queue_xmit(sk, dev, buff, 1);}/* * FIXME: * This routine frees used buffers. * It should consider sending an ACK to let the * other end know we now have a bigger window. */static voidcleanup_rbuf(struct sock *sk){ unsigned long flags; int left; struct sk_buff *skb; if(sk->debug) printk("cleaning rbuf for sk=%p\n", sk); save_flags(flags); cli(); left = sk->prot->rspace(sk); /* * We have to loop through all the buffer headers, * and try to free up all the space we can. */ while((skb=skb_peek(&sk->rqueue)) != NULL ) { if (!skb->used) break; skb_unlink(skb); skb->sk = sk; kfree_skb(skb, FREE_READ); } restore_flags(flags); /* * FIXME: * At this point we should send an ack if the difference * in the window, and the amount of space is bigger than * TCP_WINDOW_DIFF. */ DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n", sk->window - sk->bytes_rcv, sk->prot->rspace(sk))); if(sk->debug) printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk), left); if (sk->prot->rspace(sk) != left) { /* * This area has caused the most trouble. The current strategy * is to simply do nothing if the other end has room to send at * least 3 full packets, because the ack from those will auto- * matically update the window. If the other end doesn't think * we have much space left, but we have room for atleast 1 more * complete packet than it thinks we do, we will send an ack * immediatedly. Otherwise we will wait up to .5 seconds in case * the user reads some more. */ sk->ack_backlog++;/* * It's unclear whether to use sk->mtu or sk->mss here. They differ only * if the other end is offering a window smaller than the agreed on MSS * (called sk->mtu here). In theory there's no connection between send * and receive, and so no reason to think that they're going to send * small packets. For the moment I'm using the hack of reducing the mss * only on the send side, so I'm putting mtu here. */ if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) { /* Send an ack right now. */ tcp_read_wakeup(sk); } else { /* Force it to send an ack soon. */ int was_active = del_timer(&sk->timer); if (!was_active || TCP_ACK_TIME < sk->timer.expires) { reset_timer(sk, TIME_WRITE, TCP_ACK_TIME); } else add_timer(&sk->timer); } }} /* Handle reading urgent data. */static inttcp_read_urg(struct sock * sk, int nonblock, unsigned char *to, int len, unsigned flags){ struct wait_queue wait = { current, NULL }; while (len > 0) { if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ) return -EINVAL; if (sk->urg_data & URG_VALID) { char c = sk->urg_data; if (!(flags & MSG_PEEK)) sk->urg_data = URG_READ; put_fs_byte(c, to); return 1; } if (sk->err) { int tmp = -sk->err; sk->err = 0; return tmp; } if (sk->state == TCP_CLOSE || sk->done) { if (!sk->done) { sk->done = 1; return 0; } return -ENOTCONN; } if (sk->shutdown & RCV_SHUTDOWN) { sk->done = 1; return 0; } if (nonblock) return -EAGAIN; if (current->signal & ~current->blocked) return -ERESTARTSYS; current->state = TASK_INTERRUPTIBLE; add_wait_queue(sk->sleep, &wait); if ((sk->urg_data & URG_NOTYET) && sk->err == 0 && !(sk->shutdown & RCV_SHUTDOWN)) schedule(); remove_wait_queue(sk->sleep, &wait); current->state = TASK_RUNNING; } return 0;}/* This routine copies from a sock struct into the user buffer. */static int tcp_read(struct sock *sk, unsigned char *to, int len, int nonblock, unsigned flags){ struct wait_queue wait = { current, NULL }; int copied = 0; unsigned long peek_seq; unsigned long *seq; unsigned long used; int err; if (len == 0) return 0; if (len < 0) return -EINVAL; err = verify_area(VERIFY_WRITE, to, len); if (err) return err; /* This error should be checked. */ if (sk->state == TCP_LISTEN) return -ENOTCONN; /* Urgent data needs to be handled specially. */ if (flags & MSG_OOB) return tcp_read_urg(sk, nonblock, to, len, flags); peek_seq = sk->copied_seq; seq = &sk->copied_seq; if (flags & MSG_PEEK) seq = &peek_seq; add_wait_queue(sk->sleep, &wait); sk->inuse = 1; while (len > 0) { struct sk_buff * skb; unsigned long offset; /* * are we at urgent data? Stop if we have read anything. */ if (copied && sk->urg_data && sk->urg_seq == 1+*seq) break; current->state = TASK_INTERRUPTIBLE; skb = sk->rqueue; do { if (!skb) break; if (before(1+*seq, skb->h.th->seq)) break; offset = 1 + *seq - skb->h.th->seq; if (skb->h.th->syn) offset--; if (offset < skb->len) goto found_ok_skb; if (!(flags & MSG_PEEK)) skb->used = 1; skb = (struct sk_buff *)skb->next; } while (skb != sk->rqueue); if (copied) break; if (sk->err) { copied = -sk->err; sk->err = 0; break; } if (sk->state == TCP_CLOSE) { if (!sk->done) { sk->done = 1; break; } copied = -ENOTCONN; break; } if (sk->shutdown & RCV_SHUTDOWN) { sk->done = 1; break; } if (nonblock) { copied = -EAGAIN; break; } cleanup_rbuf(sk); release_sock(sk); schedule(); sk->inuse = 1; if (current->signal & ~current->blocked) { copied = -ERESTARTSYS; break; } continue; found_ok_skb: /* Ok so how much can we use ? */ used = skb->len - offset; if (len < used) used = len; /* do we have urgent data here? */ if (sk->urg_data) { unsigned long urg_offset = sk->urg_seq - (1 + *seq); if (urg_offset < used) { if (!urg_offset) { if (!sk->urginline) { ++*seq; offset++; used--; } } else used = urg_offset; } } /* Copy it */ memcpy_tofs(to,((unsigned char *)skb->h.th) + skb->h.th->doff*4 + offset, used); copied += used; len -= used; to += used; *seq += used; if (after(sk->copied_seq+1,sk->urg_seq)) sk->urg_data = 0; if (!(flags & MSG_PEEK) && (used + offset >= skb->len)) skb->used = 1; } remove_wait_queue(sk->sleep, &wait); current->state = TASK_RUNNING; /* Clean up data we have read: This will do ACK frames */ cleanup_rbuf(sk); release_sock(sk); DPRINTF((DBG_TCP, "tcp_read: returning %d\n", copied)); return copied;} /* * Send a FIN without closing the connection. * Not called at interrupt time. */voidtcp_shutdown(struct sock *sk, int how){ struct sk_buff *buff; struct tcphdr *t1, *th; struct proto *prot; int tmp; struct device *dev = NULL; /* * We need to grab some memory, and put together a FIN, * and then put it into the queue to be sent. * FIXME: * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92. * Most of this is guesswork, so maybe it will work... */ /* If we've already sent a FIN, return. */ if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2) return; if (!(how & SEND_SHUTDOWN)) return; sk->inuse = 1; /* Clear out any half completed packets. */ if (sk->partial) tcp_send_partial(sk); prot =(struct proto *)sk->prot; th =(struct tcphdr *)&sk->dummy_th; release_sock(sk); /* incase the malloc sleeps. */ buff = prot->wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL); if (buff == NULL) return; sk->inuse = 1; DPRINTF((DBG_TCP, "tcp_shutdown_send buff = %X\n", buff)); buff->mem_addr = buff; buff->mem_len = MAX_RESET_SIZE; buff->sk = sk; buff->len = sizeof(*t1); t1 =(struct tcphdr *) buff->data; /* Put in the IP header and routing stuff. */ tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev, IPPROTO_TCP, sk->opt, sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl); if (tmp < 0) { buff->free=1; prot->wfree(sk,buff->mem_addr, buff->mem_len); release_sock(sk); DPRINTF((DBG_TCP, "Unable to build header for fin.\n")); return; } t1 =(struct tcphdr *)((char *)t1 +tmp); buff->len += tmp; buff->dev = dev; memcpy(t1, th, sizeof(*t1));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -