📄 tcp.c
字号:
1024 skb->h.th =(struct tcphdr *) buff;
1025 tmp = tcp_build_header((struct tcphdr *)buff, sk, len-copy);
1026 if (tmp < 0) {
1027 prot->wfree(sk, skb->mem_addr, skb->mem_len);
1028 release_sock(sk);
1029 DPRINTF((DBG_TCP, "tcp_write: return 7\n"));
1030 if (copied) return(copied);
1031 return(tmp);
1032 }
1033
1034 if (flags & MSG_OOB) {
1035 ((struct tcphdr *)buff)->urg = 1;
1036 ((struct tcphdr *)buff)->urg_ptr = ntohs(copy);
1037 }
1038 skb->len += tmp;
1039 memcpy_fromfs(buff+tmp, from, copy);
1040
1041 from += copy;
1042 copied += copy;
1043 len -= copy;
1044 skb->len += copy;
1045 skb->free = 0;
1046 sk->write_seq += copy;
1047
1048 if (send_tmp != NULL && sk->packets_out) {
1049 tcp_enqueue_partial(send_tmp, sk);
1050 continue;
1051 }
1052 tcp_send_skb(sk, skb);
1053 }
1054 sk->err = 0;
1055
1056 /*
1057 * Nagles rule. Turn Nagle off with TCP_NODELAY for highly
1058 * interactive fast network servers. It's meant to be on and
1059 * it really improves the throughput though not the echo time
1060 * on my slow slip link - Alan
1061 */
1062
1063 /* Avoid possible race on send_tmp - c/o Johannes Stille */
1064 if(sk->partial &&
1065 ((!sk->packets_out)
1066 /* If not nagling we can send on the before case too.. */
1067 || (sk->nonagle && before(sk->write_seq , sk->window_seq))
1068 ))
1069 tcp_send_partial(sk);
1070 /* -- */
1071 release_sock(sk);
1072 DPRINTF((DBG_TCP, "tcp_write: return 8\n"));
1073 return(copied);
1074 }
1075
1076
1077 static int
1078 tcp_sendto(struct sock *sk, unsigned char *from,
1079 int len, int nonblock, unsigned flags,
1080 struct sockaddr_in *addr, int addr_len)
1081 {
1082 struct sockaddr_in sin;
1083
1084 if (addr_len < sizeof(sin)) return(-EINVAL);
1085 memcpy_fromfs(&sin, addr, sizeof(sin));
1086 if (sin.sin_family && sin.sin_family != AF_INET) return(-EINVAL);
1087 if (sin.sin_port != sk->dummy_th.dest) return(-EINVAL);
1088 if (sin.sin_addr.s_addr != sk->daddr) return(-EINVAL);
1089 return(tcp_write(sk, from, len, nonblock, flags));
1090 }
1091
1092
1093 static void
1094 tcp_read_wakeup(struct sock *sk)
1095 {
1096 int tmp;
1097 struct device *dev = NULL;
1098 struct tcphdr *t1;
1099 struct sk_buff *buff;
1100
1101 DPRINTF((DBG_TCP, "in tcp read wakeup\n"));
1102 if (!sk->ack_backlog) return;
1103
1104 /*
1105 * FIXME: we need to put code here to prevent this routine from
1106 * being called. Being called once in a while is ok, so only check
1107 * if this is the second time in a row.
1108 */
1109
1110 /*
1111 * We need to grab some memory, and put together an ack,
1112 * and then put it into the queue to be sent.
1113 */
1114 buff = sk->prot->wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1115 if (buff == NULL) {
1116 /* Try again real soon. */
1117 reset_timer(sk, TIME_WRITE, 10);
1118 return;
1119 }
1120
1121 buff->mem_addr = buff;
1122 buff->mem_len = MAX_ACK_SIZE;
1123 buff->len = sizeof(struct tcphdr);
1124 buff->sk = sk;
1125
1126 /* Put in the IP header and routing stuff. */
1127 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1128 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl);
1129 if (tmp < 0) {
1130 buff->free=1;
1131 sk->prot->wfree(sk, buff->mem_addr, buff->mem_len);
1132 return;
1133 }
1134
1135 buff->len += tmp;
1136 t1 =(struct tcphdr *)(buff->data +tmp);
1137
1138 memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
1139 t1->seq = htonl(sk->sent_seq);
1140 t1->ack = 1;
1141 t1->res1 = 0;
1142 t1->res2 = 0;
1143 t1->rst = 0;
1144 t1->urg = 0;
1145 t1->syn = 0;
1146 t1->psh = 0;
1147 sk->ack_backlog = 0;
1148 sk->bytes_rcv = 0;
1149 sk->window = tcp_select_window(sk);/*sk->prot->rspace(sk);*/
1150 t1->window = ntohs(sk->window);
1151 t1->ack_seq = ntohl(sk->acked_seq);
1152 t1->doff = sizeof(*t1)/4;
1153 tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), sk);
1154 sk->prot->queue_xmit(sk, dev, buff, 1);
1155 }
1156
1157
1158 /*
1159 * FIXME:
1160 * This routine frees used buffers.
1161 * It should consider sending an ACK to let the
1162 * other end know we now have a bigger window.
1163 */
1164 static void
1165 cleanup_rbuf(struct sock *sk)
1166 {
1167 unsigned long flags;
1168 int left;
1169 struct sk_buff *skb;
1170
1171 if(sk->debug)
1172 printk("cleaning rbuf for sk=%p\n", sk);
1173
1174 save_flags(flags);
1175 cli();
1176
1177 left = sk->prot->rspace(sk);
1178
1179 /*
1180 * We have to loop through all the buffer headers,
1181 * and try to free up all the space we can.
1182 */
1183 while((skb=skb_peek(&sk->rqueue)) != NULL )
1184 {
1185 if (!skb->used)
1186 break;
1187 skb_unlink(skb);
1188 skb->sk = sk;
1189 kfree_skb(skb, FREE_READ);
1190 }
1191
1192 restore_flags(flags);
1193
1194 /*
1195 * FIXME:
1196 * At this point we should send an ack if the difference
1197 * in the window, and the amount of space is bigger than
1198 * TCP_WINDOW_DIFF.
1199 */
1200 DPRINTF((DBG_TCP, "sk->window left = %d, sk->prot->rspace(sk)=%d\n",
1201 sk->window - sk->bytes_rcv, sk->prot->rspace(sk)));
1202
1203 if(sk->debug)
1204 printk("sk->rspace = %lu, was %d\n", sk->prot->rspace(sk),
1205 left);
1206 if (sk->prot->rspace(sk) != left)
1207 {
1208 /*
1209 * This area has caused the most trouble. The current strategy
1210 * is to simply do nothing if the other end has room to send at
1211 * least 3 full packets, because the ack from those will auto-
1212 * matically update the window. If the other end doesn't think
1213 * we have much space left, but we have room for atleast 1 more
1214 * complete packet than it thinks we do, we will send an ack
1215 * immediatedly. Otherwise we will wait up to .5 seconds in case
1216 * the user reads some more.
1217 */
1218 sk->ack_backlog++;
1219 /*
1220 * It's unclear whether to use sk->mtu or sk->mss here. They differ only
1221 * if the other end is offering a window smaller than the agreed on MSS
1222 * (called sk->mtu here). In theory there's no connection between send
1223 * and receive, and so no reason to think that they're going to send
1224 * small packets. For the moment I'm using the hack of reducing the mss
1225 * only on the send side, so I'm putting mtu here.
1226 */
1227 if ((sk->prot->rspace(sk) > (sk->window - sk->bytes_rcv + sk->mtu))) {
1228 /* Send an ack right now. */
1229 tcp_read_wakeup(sk);
1230 } else {
1231 /* Force it to send an ack soon. */
1232 int was_active = del_timer(&sk->timer);
1233 if (!was_active || TCP_ACK_TIME < sk->timer.expires) {
1234 reset_timer(sk, TIME_WRITE, TCP_ACK_TIME);
1235 } else
1236 add_timer(&sk->timer);
1237 }
1238 }
1239 }
1240
1241
1242 /* Handle reading urgent data. */
1243 static int
1244 tcp_read_urg(struct sock * sk, int nonblock,
1245 unsigned char *to, int len, unsigned flags)
1246 {
1247 struct wait_queue wait = { current, NULL };
1248
1249 while (len > 0) {
1250 if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
1251 return -EINVAL;
1252 if (sk->urg_data & URG_VALID) {
1253 char c = sk->urg_data;
1254 if (!(flags & MSG_PEEK))
1255 sk->urg_data = URG_READ;
1256 put_fs_byte(c, to);
1257 return 1;
1258 }
1259
1260 if (sk->err) {
1261 int tmp = -sk->err;
1262 sk->err = 0;
1263 return tmp;
1264 }
1265
1266 if (sk->state == TCP_CLOSE || sk->done) {
1267 if (!sk->done) {
1268 sk->done = 1;
1269 return 0;
1270 }
1271 return -ENOTCONN;
1272 }
1273
1274 if (sk->shutdown & RCV_SHUTDOWN) {
1275 sk->done = 1;
1276 return 0;
1277 }
1278
1279 if (nonblock)
1280 return -EAGAIN;
1281
1282 if (current->signal & ~current->blocked)
1283 return -ERESTARTSYS;
1284
1285 current->state = TASK_INTERRUPTIBLE;
1286 add_wait_queue(sk->sleep, &wait);
1287 if ((sk->urg_data & URG_NOTYET) && sk->err == 0 &&
1288 !(sk->shutdown & RCV_SHUTDOWN))
1289 schedule();
1290 remove_wait_queue(sk->sleep, &wait);
1291 current->state = TASK_RUNNING;
1292 }
1293 return 0;
1294 }
1295
1296
1297 /* This routine copies from a sock struct into the user buffer. */
1298 static int tcp_read(struct sock *sk, unsigned char *to,
1299 int len, int nonblock, unsigned flags)
1300 {
1301 struct wait_queue wait = { current, NULL };
1302 int copied = 0;
1303 unsigned long peek_seq;
1304 unsigned long *seq;
1305 unsigned long used;
1306 int err;
1307
1308 if (len == 0)
1309 return 0;
1310
1311 if (len < 0)
1312 return -EINVAL;
1313
1314 err = verify_area(VERIFY_WRITE, to, len);
1315 if (err)
1316 return err;
1317
1318 /* This error should be checked. */
1319 if (sk->state == TCP_LISTEN)
1320 return -ENOTCONN;
1321
1322 /* Urgent data needs to be handled specially. */
1323 if (flags & MSG_OOB)
1324 return tcp_read_urg(sk, nonblock, to, len, flags);
1325
1326 peek_seq = sk->copied_seq;
1327 seq = &sk->copied_seq;
1328 if (flags & MSG_PEEK)
1329 seq = &peek_seq;
1330
1331 add_wait_queue(sk->sleep, &wait);
1332 sk->inuse = 1;
1333 while (len > 0) {
1334 struct sk_buff * skb;
1335 unsigned long offset;
1336
1337 /*
1338 * are we at urgent data? Stop if we have read anything.
1339 */
1340 if (copied && sk->urg_data && sk->urg_seq == 1+*seq)
1341 break;
1342
1343 current->state = TASK_INTERRUPTIBLE;
1344
1345 skb = sk->rqueue;
1346 do {
1347 if (!skb)
1348 break;
1349 if (before(1+*seq, skb->h.th->seq))
1350 break;
1351 offset = 1 + *seq - skb->h.th->seq;
1352 if (skb->h.th->syn)
1353 offset--;
1354 if (offset < skb->len)
1355 goto found_ok_skb;
1356 if (!(flags & MSG_PEEK))
1357 skb->used = 1;
1358 skb = (struct sk_buff *)skb->next;
1359 } while (skb != sk->rqueue);
1360
1361 if (copied)
1362 break;
1363
1364 if (sk->err) {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -