patch-2.1.36 linux/net/ipv4/tcp.c
Next file: linux/net/ipv4/tcp_input.c
Previous file: linux/net/ipv4/sysctl_net_ipv4.c
Back to the patch index
Back to the overall index
- Lines: 1199
- Date:
Tue Apr 22 22:46:27 1997
- Orig file:
v2.1.35/linux/net/ipv4/tcp.c
- Orig date:
Mon Apr 14 16:28:28 1997
diff -u --recursive --new-file v2.1.35/linux/net/ipv4/tcp.c linux/net/ipv4/tcp.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.55 1997/04/13 10:31:45 davem Exp $
+ * Version: $Id: tcp.c,v 1.61 1997/04/22 02:53:10 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -444,15 +444,14 @@
{
struct open_request *req = tp->syn_wait_queue;
- if (!req)
- return NULL;
- do {
+ while(req) {
if (req->sk &&
(req->sk->state == TCP_ESTABLISHED ||
req->sk->state >= TCP_FIN_WAIT1))
- return req;
- } while ((req = req->dl_next) != tp->syn_wait_queue);
- return NULL;
+ break;
+ req = req->dl_next;
+ }
+ return req;
}
/*
@@ -466,9 +465,7 @@
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
struct open_request *req = tp->syn_wait_queue;
- if (!req)
- return;
- do {
+ while(req) {
struct open_request *iter;
if (req->sk)
@@ -481,9 +478,10 @@
tcp_dec_slow_timer(TCP_SLT_SYNACK);
sk->ack_backlog--;
tcp_openreq_free(iter);
- } while (req != tp->syn_wait_queue);
+ }
tp->syn_wait_queue = NULL;
+ tp->syn_wait_last = &tp->syn_wait_queue;
}
/*
@@ -518,8 +516,7 @@
save_flags(flags);
cli();
- if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL)
- {
+ if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) {
restore_flags(flags);
SOCK_DEBUG(sk, "empty\n");
return(0);
@@ -528,32 +525,27 @@
counted = sk->copied_seq; /* Where we are at the moment */
amount = 0;
- /*
- * Do until a push or until we are out of data.
- */
-
- do
- {
- /* Found a hole so stops here */
+ /* Do until a push or until we are out of data. */
+ do {
+ /* Found a hole so stops here. */
if (before(counted, skb->seq))
break;
- /*
- * Length - header but start from where we are up to
- * avoid overlaps
+
+ /* Length - header but start from where we are up to
+ * avoid overlaps.
*/
- sum = skb->len - (counted - skb->seq);
+ sum = skb->len - (counted - skb->seq);
if (skb->h.th->syn)
sum++;
- if (sum > 0)
- {
- /* Add it up, move on */
+ if (sum > 0) {
+ /* Add it up, move on. */
amount += sum;
if (skb->h.th->syn)
amount--;
counted += sum;
}
- /*
- * Don't count urg data ... but do it in the right place!
+
+ /* Don't count urg data ... but do it in the right place!
* Consider: "old_data (ptr is here) URG PUSH data"
* The old code would stop at the first push because
* it counted the urg (amount==1) and then does amount--
@@ -569,15 +561,14 @@
* was correct. Mike <pall@rz.uni-karlsruhe.de>
*/
- /* don't count urg data */
+ /* Don't count urg data. */
if (skb->h.th->urg)
amount--;
#if 0
if (amount && skb->h.th->psh) break;
#endif
skb = skb->next;
- }
- while(skb != (struct sk_buff *)&sk->receive_queue);
+ } while(skb != (struct sk_buff *)&sk->receive_queue);
restore_flags(flags);
SOCK_DEBUG(sk, "got %lu bytes.\n",amount);
@@ -619,18 +610,20 @@
mask = 0;
if (sk->err)
mask = POLLERR;
- /* connected ? */
+ /* Connected? */
if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
-
if (sk->shutdown & RCV_SHUTDOWN)
mask |= POLLHUP;
-
+
if ((tp->rcv_nxt != sk->copied_seq) &&
(sk->urg_seq != sk->copied_seq ||
tp->rcv_nxt != sk->copied_seq+1 ||
sk->urginline || !sk->urg_data))
mask |= POLLIN | POLLRDNORM;
+ /* FIXME: this assumed sk->mtu is correctly maintained.
+ * I see no evidence this is the case. -- erics
+ */
if (!(sk->shutdown & SEND_SHUTDOWN) &&
(sock_wspace(sk) >= sk->mtu+128+sk->prot->max_header))
mask |= POLLOUT | POLLWRNORM;
@@ -643,9 +636,7 @@
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
- switch(cmd)
- {
-
+ switch(cmd) {
case TIOCINQ:
#ifdef FIXME /* FIXME: */
case FIONREAD:
@@ -676,28 +667,39 @@
}
default:
return(-EINVAL);
- }
+ };
}
/*
* This routine builds a generic TCP header.
+ * It also builds in the RFC1323 Timestamp.
+ * It can't (unfortunately) do SACK as well.
*/
-extern __inline int tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
+extern __inline void tcp_build_header(struct tcphdr *th, struct sock *sk, int push)
{
- struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);
+ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
memcpy(th,(void *) &(sk->dummy_th), sizeof(*th));
th->seq = htonl(sk->write_seq);
-
th->psh =(push == 0) ? 1 : 0;
-
- sk->bytes_rcv = 0;
- sk->ack_timed = 0;
th->ack_seq = htonl(tp->rcv_nxt);
th->window = htons(tcp_select_window(sk));
- return(sizeof(*th));
+ /* FIXME: could use the inline found in tcp_output.c as well.
+ * Probably that means we should move these up to an include file. --erics
+ */
+ if (tp->tstamp_ok) {
+ __u32 *ptr = (__u32 *)(th+1);
+ *ptr++ = ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
+ | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
+ /* FIXME: Not sure it's worth setting these here already, but I'm
+ * also not sure we replace them on all paths later. --erics
+ */
+ *ptr++ = jiffies;
+ *ptr++ = tp->ts_recent;
+ }
}
/*
@@ -708,9 +710,7 @@
release_sock(sk);
cli();
if (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT && sk->err == 0)
- {
interruptible_sleep_on(sk->sleep);
- }
sti();
lock_sock(sk);
}
@@ -756,22 +756,15 @@
int fault;
int copy;
- /*
- * Add more stuff to the end
- * of the skb
- */
-
+ /* Add more stuff to the end of the skb. */
copy = min(sk->mss - tcp_size, skb_tailroom(skb));
copy = min(copy, seglen);
tcp_size += copy;
fault = copy_from_user(skb->tail, from, copy);
-
if (fault)
- {
return -1;
- }
skb_put(skb, copy);
skb->csum = csum_partial(skb->tail - tcp_size, tcp_size, 0);
@@ -793,19 +786,12 @@
int copied = 0;
struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);
- /*
- * Wait for a connection to finish.
- */
- while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT)
- {
- if (copied)
- return copied;
-
+ /* Wait for a connection to finish. */
+ while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) {
if (sk->err)
return sock_error(sk);
- if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV)
- {
+ if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) {
if (sk->keepopen)
send_sig(SIGPIPE, current, 0);
return -EPIPE;
@@ -820,19 +806,14 @@
wait_for_tcp_connect(sk);
}
- /*
- * Ok commence sending
- */
-
- while(--iovlen >= 0)
- {
+ /* Ok commence sending. */
+ while(--iovlen >= 0) {
int seglen=iov->iov_len;
unsigned char * from=iov->iov_base;
iov++;
- while(seglen > 0)
- {
+ while(seglen > 0) {
unsigned int actual_win;
int copy;
int tmp;
@@ -840,142 +821,110 @@
if (err)
return (err);
- /*
- * Stop on errors
- */
- if (sk->err)
- {
+
+ /* Stop on errors. */
+ if (sk->err) {
if (copied)
return copied;
return sock_error(sk);
}
- /*
- * Make sure that we are established.
- */
- if (sk->shutdown & SEND_SHUTDOWN)
- {
+ /* Make sure that we are established. */
+ if (sk->shutdown & SEND_SHUTDOWN) {
if (copied)
return copied;
send_sig(SIGPIPE,current,0);
return -EPIPE;
}
- /*
- * Now we need to check if we have a half built packet.
- */
+ /* Now we need to check if we have a half built packet. */
- /* if we have queued packets */
- if (tp->send_head && !(flags & MSG_OOB) )
- {
+ /* If we have queued packets.. */
+ if (tp->send_head && !(flags & MSG_OOB)) {
int tcp_size;
/* Tail */
skb = sk->write_queue.prev;
tcp_size = skb->tail -
- (unsigned char *)(skb->h.th + 1);
+ ((unsigned char *)(skb->h.th) + tp->tcp_header_len);
- /*
- * This window_seq test is somewhat dangerous
+ /* printk("extending buffer\n"); */
+ /* This window_seq test is somewhat dangerous
* If the remote does SWS avoidance we should
* queue the best we can if not we should in
* fact send multiple packets...
* a method for detecting this would be most
* welcome
*/
-
if (skb->end > skb->tail &&
sk->mss - tcp_size > 0 &&
- tp->snd_nxt < skb->end_seq)
- {
+ tp->snd_nxt < skb->end_seq) {
int tcopy;
-
+
tcopy = tcp_append_tail(sk, skb, from,
tcp_size,
seglen);
if (tcopy == -1)
- {
return -EFAULT;
- }
from += tcopy;
copied += tcopy;
seglen -= tcopy;
- /*
- * FIXME: if we're nagling we
+ /* FIXME: if we're nagling we
* should send here.
*/
continue;
}
}
-
- /*
- * We also need to worry about the window.
- * If window < 1/2 the maximum window we've seen from this
- * host, don't use it. This is sender side
- * silly window prevention, as specified in RFC1122.
- * (Note that this is different than earlier versions of
- * SWS prevention, e.g. RFC813.). What we actually do is
- * use the whole MSS. Since the results in the right
- * edge of the packet being outside the window, it will
- * be queued for later rather than sent.
- */
-
+ /* We also need to worry about the window.
+ * If window < 1/2 the maximum window we've seen from this
+ * host, don't use it. This is sender side
+ * silly window prevention, as specified in RFC1122.
+ * (Note that this is different than earlier versions of
+ * SWS prevention, e.g. RFC813.). What we actually do is
+ * use the whole MSS. Since the results in the right
+ * edge of the packet being outside the window, it will
+ * be queued for later rather than sent.
+ */
copy = min(seglen, sk->mss);
-
actual_win = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);
if (copy > actual_win &&
- (((int) actual_win) >= (sk->max_window >> 1))
- && actual_win)
- {
+ (((int) actual_win) >= (tp->max_window >> 1)) &&
+ actual_win)
copy = actual_win;
- }
- if (copy <= 0)
- {
+ if (copy <= 0) {
printk(KERN_DEBUG "sendmsg: copy < 0\n");
return -EIO;
}
- /*
- * If sk->packets_out > 0 segment will be nagled
- * else we kick it right away
+ /* If tp->packets_out > 0 segment will be nagled
+ * else we kick it right away.
*/
-
tmp = MAX_HEADER + sk->prot->max_header +
sizeof(struct sk_buff) + 15;
- if (copy < min(sk->mss, sk->max_window >> 1) &&
- !(flags & MSG_OOB) && atomic_read(&sk->packets_out))
- {
- tmp += min(sk->mss, sk->max_window);
- }
+ if (copy < min(sk->mss, tp->max_window >> 1) &&
+ !(flags & MSG_OOB) && tp->packets_out)
+ tmp += min(sk->mss, tp->max_window);
else
- {
tmp += copy;
- }
skb = sock_wmalloc(sk, tmp, 0, GFP_KERNEL);
- /*
- * If we didn't get any memory, we need to sleep.
- */
-
- if (skb == NULL)
- {
+ /* If we didn't get any memory, we need to sleep. */
+ if (skb == NULL) {
sk->socket->flags |= SO_NOSPACE;
- if (flags&MSG_DONTWAIT)
- {
+ if (flags&MSG_DONTWAIT) {
if (copied)
return copied;
return -EAGAIN;
}
- if (current->signal & ~current->blocked)
- {
+ if (current->signal & ~current->blocked) {
if (copied)
return copied;
return -ERESTARTSYS;
@@ -985,37 +934,25 @@
continue;
}
- /*
- * FIXME: we need to optimize this.
+ /* FIXME: we need to optimize this.
* Perhaps some hints here would be good.
*/
-
tmp = tp->af_specific->build_net_header(sk, skb);
-
- if (tmp < 0)
- {
+ if (tmp < 0) {
kfree_skb(skb, FREE_WRITE);
if (copied)
return(copied);
return(tmp);
}
- skb->h.th =(struct tcphdr *)
- skb_put(skb,sizeof(struct tcphdr));
+ skb->h.th =(struct tcphdr *)
+ skb_put(skb,tp->tcp_header_len);
seglen -= copy;
- tmp = tcp_build_header(skb->h.th, sk, seglen || iovlen);
+ tcp_build_header(skb->h.th, sk, seglen || iovlen);
+ /* FIXME: still need to think about SACK options here. */
- if (tmp < 0)
- {
- kfree_skb(skb, FREE_WRITE);
- if (copied)
- return(copied);
- return(tmp);
- }
-
- if (flags & MSG_OOB)
- {
+ if (flags & MSG_OOB) {
skb->h.th->urg = 1;
skb->h.th->urg_ptr = ntohs(copy);
}
@@ -1027,7 +964,7 @@
copied += copy;
sk->write_seq += copy;
-
+
tcp_send_skb(sk, skb);
release_sock(sk);
@@ -1043,9 +980,6 @@
return copied;
}
-
-
-
/*
* Send an ack if one is backlogged at this point. Ought to merge
* this with tcp_send_ack().
@@ -1054,18 +988,15 @@
void tcp_read_wakeup(struct sock *sk)
{
- /*
- * If we're closed, don't send an ack, or we'll get a RST
+ /* If we're closed, don't send an ack, or we'll get a RST
* from the closed destination.
*/
-
if ((sk->state == TCP_CLOSE) || (sk->state == TCP_TIME_WAIT))
return;
tcp_send_ack(sk);
}
-
/*
* Handle reading urgent data. BSD has very simple semantics for
* this, no blocking and very strange errors 8)
@@ -1078,33 +1009,28 @@
int err=0;
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- /*
- * No URG data to read
- */
+ /* No URG data to read. */
if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ)
return -EINVAL; /* Yes this is right ! */
if (sk->err)
return sock_error(sk);
- if (sk->state == TCP_CLOSE || sk->done)
- {
- if (!sk->done)
- {
+ if (sk->state == TCP_CLOSE || sk->done) {
+ if (!sk->done) {
sk->done = 1;
return 0;
}
return -ENOTCONN;
}
- if (sk->shutdown & RCV_SHUTDOWN)
- {
+ if (sk->shutdown & RCV_SHUTDOWN) {
sk->done = 1;
return 0;
}
+
lock_sock(sk);
- if (sk->urg_data & URG_VALID)
- {
+ if (sk->urg_data & URG_VALID) {
char c = sk->urg_data;
if (!(flags & MSG_PEEK))
sk->urg_data = URG_READ;
@@ -1115,23 +1041,20 @@
msg->msg_flags|=MSG_TRUNC;
if(msg->msg_name)
- {
tp->af_specific->addr2sockaddr(sk, (struct sockaddr *)
msg->msg_name);
- }
+
if(addr_len)
*addr_len = tp->af_specific->sockaddr_len;
- /*
- * Read urgent data
- */
+
+ /* Read urgent data. */
msg->msg_flags|=MSG_OOB;
release_sock(sk);
return err ? -EFAULT : 1;
}
release_sock(sk);
- /*
- * Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
+ /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
* the available implementations agree in this case:
* this call should never block, independent of the
* blocking state of the socket.
@@ -1148,7 +1071,7 @@
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb)
{
- sk->delayed_acks++;
+ sk->tp_pinfo.af_tcp.delayed_acks++;
__skb_unlink(skb, &sk->receive_queue);
kfree_skb(skb, FREE_READ);
@@ -1159,11 +1082,9 @@
{
struct sk_buff *skb;
- /*
- * NOTE! The socket must be locked, so that we don't get
+ /* NOTE! The socket must be locked, so that we don't get
* a messed-up receive queue.
*/
-
while ((skb=skb_peek(&sk->receive_queue)) != NULL) {
if (!skb->used || atomic_read(&skb->users)>1)
break;
@@ -1172,13 +1093,10 @@
SOCK_DEBUG(sk, "sk->rspace = %lu\n", sock_rspace(sk));
- /*
- * We send a ACK if the sender is blocked
- * else let tcp_data deal with the acking policy.
+ /* We send a ACK if the sender is blocked
+ * else let tcp_data deal with the acking policy.
*/
-
- if (sk->delayed_acks)
- {
+ if (sk->tp_pinfo.af_tcp.delayed_acks) {
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
__u32 rcv_wnd;
@@ -1209,47 +1127,34 @@
if (sk->state == TCP_LISTEN)
return -ENOTCONN;
- /*
- * Urgent data needs to be handled specially.
- */
-
+ /* Urgent data needs to be handled specially. */
if (flags & MSG_OOB)
return tcp_recv_urg(sk, nonblock, msg, len, flags, addr_len);
- /*
- * Copying sequence to update. This is volatile to handle
+ /* Copying sequence to update. This is volatile to handle
* the multi-reader case neatly (memcpy_to/fromfs might be
* inline and thus not flush cached variables otherwise).
*/
-
peek_seq = sk->copied_seq;
seq = &sk->copied_seq;
if (flags & MSG_PEEK)
seq = &peek_seq;
- /*
- * Handle the POSIX bogosity MSG_WAITALL
- */
-
+ /* Handle the POSIX bogosity MSG_WAITALL. */
if (flags & MSG_WAITALL)
target=len;
add_wait_queue(sk->sleep, &wait);
lock_sock(sk);
- while (len > 0)
- {
+ while (len > 0) {
struct sk_buff * skb;
u32 offset;
- /*
- * Are we at urgent data? Stop if we have read anything.
- */
-
+ /* Are we at urgent data? Stop if we have read anything. */
if (copied && sk->urg_data && sk->urg_seq == *seq)
break;
- /*
- * We need to check signals first, to get correct SIGURG
+ /* We need to check signals first, to get correct SIGURG
* handling. FIXME: Need to check this doesnt impact 1003.1g
* and move it down to the bottom of the loop
*/
@@ -1262,20 +1167,16 @@
break;
}
- /*
- * Next get a buffer.
- */
-
+ /* Next get a buffer. */
current->state = TASK_INTERRUPTIBLE;
skb = skb_peek(&sk->receive_queue);
- do
- {
+ do {
if (!skb)
break;
- /*
- * now that we have two receive queues this
- * shouldn't happen
+
+ /* Now that we have two receive queues this
+ * shouldn't happen.
*/
if (before(*seq, skb->seq)) {
printk(KERN_INFO "recvmsg bug: copied %X seq %X\n",
@@ -1292,22 +1193,18 @@
if (!(flags & MSG_PEEK))
skb->used = 1;
skb = skb->next;
- }
- while (skb != (struct sk_buff *)&sk->receive_queue);
+ } while (skb != (struct sk_buff *)&sk->receive_queue);
if (copied >= target)
break;
- if (sk->err && !(flags&MSG_PEEK))
- {
+ if (sk->err && !(flags&MSG_PEEK)) {
copied = sock_error(sk);
break;
}
- if (sk->state == TCP_CLOSE)
- {
- if (!sk->done)
- {
+ if (sk->state == TCP_CLOSE) {
+ if (!sk->done) {
sk->done = 1;
break;
}
@@ -1315,14 +1212,12 @@
break;
}
- if (sk->shutdown & RCV_SHUTDOWN)
- {
+ if (sk->shutdown & RCV_SHUTDOWN) {
sk->done = 1;
break;
}
- if (nonblock)
- {
+ if (nonblock) {
copied = -EAGAIN;
break;
}
@@ -1336,65 +1231,46 @@
continue;
found_ok_skb:
- /*
- * Lock the buffer. We can be fairly relaxed as
+ /* Lock the buffer. We can be fairly relaxed as
* an interrupt will never steal a buffer we are
* using unless I've missed something serious in
* tcp_data.
*/
-
atomic_inc(&skb->users);
- /*
- * Ok so how much can we use ?
- */
-
+ /* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
used = len;
- /*
- * Do we have urgent data here?
- */
- if (sk->urg_data)
- {
+ /* Do we have urgent data here? */
+ if (sk->urg_data) {
u32 urg_offset = sk->urg_seq - *seq;
- if (urg_offset < used)
- {
- if (!urg_offset)
- {
- if (!sk->urginline)
- {
+ if (urg_offset < used) {
+ if (!urg_offset) {
+ if (!sk->urginline) {
++*seq;
offset++;
used--;
}
- }
- else
+ } else
used = urg_offset;
}
}
- /*
- * Copy it - We _MUST_ update *seq first so that we
+ /* Copy it - We _MUST_ update *seq first so that we
* don't ever double read when we have dual readers
*/
-
*seq += used;
- /*
- * This memcpy_toiovec can sleep. If it sleeps and we
+ /* This memcpy_toiovec can sleep. If it sleeps and we
* do a second read it relies on the skb->users to avoid
* a crash when cleanup_rbuf() gets called.
*/
-
err = memcpy_toiovec(msg->msg_iov, ((unsigned char *)skb->h.th) + skb->h.th->doff*4 + offset, used);
- if (err)
- {
- /*
- * exception. bailout!
- */
+ if (err) {
+ /* Exception. Bailout! */
*seq -= err;
atomic_dec(&skb->users);
copied = -EFAULT;
@@ -1404,12 +1280,10 @@
copied += used;
len -= used;
- /*
- * We now will not sleep again until we are finished
+ /* We now will not sleep again until we are finished
* with skb. Sorry if you are doing the SMP port
* but you'll just have to fix it neatly ;)
*/
-
atomic_dec(&skb->users);
if (after(sk->copied_seq,sk->urg_seq))
@@ -1417,11 +1291,9 @@
if (used + offset < skb->len)
continue;
- /*
- * Process the FIN. We may also need to handle PSH
- * here and make it break out of MSG_WAITALL
+ /* Process the FIN. We may also need to handle PSH
+ * here and make it break out of MSG_WAITALL.
*/
-
if (skb->h.th->fin)
goto found_fin_ok;
if (flags & MSG_PEEK)
@@ -1436,35 +1308,28 @@
if (flags & MSG_PEEK)
break;
- /*
- * All is done
- */
-
+ /* All is done. */
skb->used = 1;
sk->shutdown |= RCV_SHUTDOWN;
break;
-
}
if(copied > 0 && msg->msg_name)
- {
tp->af_specific->addr2sockaddr(sk, (struct sockaddr *)
msg->msg_name);
- }
+
if(addr_len)
*addr_len = tp->af_specific->sockaddr_len;
remove_wait_queue(sk->sleep, &wait);
current->state = TASK_RUNNING;
- /* Clean up data we have read: This will do ACK frames */
+ /* Clean up data we have read: This will do ACK frames. */
cleanup_rbuf(sk);
release_sock(sk);
return copied;
}
-
-
/*
* State processing on a close. This implements the state shift for
* sending our FIN frame. Note that we only send a FIN for some
@@ -1476,8 +1341,7 @@
{
int ns=TCP_CLOSE;
int send_fin=0;
- switch(sk->state)
- {
+ switch(sk->state) {
case TCP_SYN_SENT: /* No SYN back, no FIN needed */
break;
case TCP_SYN_RECV:
@@ -1498,12 +1362,11 @@
wait only for the ACK */
ns=TCP_LAST_ACK;
send_fin=1;
- }
+ };
tcp_set_state(sk,ns);
- /*
- * This is a (useful) BSD violating of the RFC. There is a
+ /* This is a (useful) BSD violating of the RFC. There is a
* problem with TCP as specified in that the other end could
* keep a socket open forever with no application left this end.
* We use a 3 minute timeout (about the same as BSD) then kill
@@ -1511,8 +1374,7 @@
* that we won't make the old 4*rto = almost no time - whoops
* reset mistake.
*/
- if(dead && ns==TCP_FIN_WAIT2)
- {
+ if(dead && ns==TCP_FIN_WAIT2) {
int timer_active=del_timer(&sk->timer);
if(timer_active)
add_timer(&sk->timer);
@@ -1530,50 +1392,29 @@
void tcp_shutdown(struct sock *sk, int how)
{
- /*
- * We need to grab some memory, and put together a FIN,
+ /* We need to grab some memory, and put together a FIN,
* and then put it into the queue to be sent.
* Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
*/
-
if (!(how & SEND_SHUTDOWN))
return;
- /*
- * If we've already sent a FIN, or it's a closed state
- */
-
- if (sk->state == TCP_FIN_WAIT1 ||
- sk->state == TCP_FIN_WAIT2 ||
- sk->state == TCP_CLOSING ||
- sk->state == TCP_LAST_ACK ||
- sk->state == TCP_TIME_WAIT ||
- sk->state == TCP_CLOSE ||
- sk->state == TCP_LISTEN
- )
- {
- return;
- }
- lock_sock(sk);
-
- /*
- * flag that the sender has shutdown
- */
-
- sk->shutdown |= SEND_SHUTDOWN;
-
- /*
- * Clear out any half completed packets.
- */
+ /* If we've already sent a FIN, or it's a closed state, skip this. */
+ if (sk->state == TCP_ESTABLISHED ||
+ sk->state == TCP_SYN_SENT ||
+ sk->state == TCP_SYN_RECV ||
+ sk->state == TCP_CLOSE_WAIT) {
+ lock_sock(sk);
- /*
- * FIN if needed
- */
+ /* Flag that the sender has shutdown. */
+ sk->shutdown |= SEND_SHUTDOWN;
- if (tcp_close_state(sk,0))
- tcp_send_fin(sk);
+ /* Clear out any half completed packets. FIN if needed. */
+ if (tcp_close_state(sk,0))
+ tcp_send_fin(sk);
- release_sock(sk);
+ release_sock(sk);
+ }
}
@@ -1588,7 +1429,7 @@
case TCP_CLOSING:
case TCP_LAST_ACK:
return 1;
- }
+ };
return 0;
}
@@ -1597,16 +1438,12 @@
{
struct sk_buff *skb;
- /*
- * We need to grab some memory, and put together a FIN,
+ /* We need to grab some memory, and put together a FIN,
* and then put it into the queue to be sent.
*/
-
lock_sock(sk);
-
- if(sk->state == TCP_LISTEN)
- {
- /* Special case */
+ if(sk->state == TCP_LISTEN) {
+ /* Special case. */
tcp_set_state(sk, TCP_CLOSE);
tcp_close_pending(sk);
release_sock(sk);
@@ -1621,37 +1458,27 @@
if (!sk->dead)
sk->state_change(sk);
- /*
- * We need to flush the recv. buffs. We do this only on the
+ /* We need to flush the recv. buffs. We do this only on the
* descriptor close, not protocol-sourced closes, because the
* reader process may not have drained the data yet!
*/
-
while((skb=skb_dequeue(&sk->receive_queue))!=NULL)
kfree_skb(skb, FREE_READ);
-
- /*
- * Timeout is not the same thing - however the code likes
- * to send both the same way (sigh).
+ /* Timeout is not the same thing - however the code likes
+ * to send both the same way (sigh).
*/
-
if (tcp_close_state(sk,1)==1)
- {
tcp_send_fin(sk);
- }
if (timeout) {
cli();
release_sock(sk);
current->timeout = timeout;
- while(closing(sk) && current->timeout)
- {
+ while(closing(sk) && current->timeout) {
interruptible_sleep_on(sk->sleep);
if (current->signal & ~current->blocked)
- {
break;
- }
}
current->timeout=0;
lock_sock(sk);
@@ -1661,8 +1488,7 @@
/* Now that the socket is dead, if we are in the FIN_WAIT2 state
* we may need to set up a timer.
*/
- if (sk->state==TCP_FIN_WAIT2)
- {
+ if (sk->state==TCP_FIN_WAIT2) {
int timer_active=del_timer(&sk->timer);
if(timer_active)
add_timer(&sk->timer);
@@ -1677,7 +1503,6 @@
sk->prot->unhash(sk);
}
-
/*
* Wait for an incoming connection, avoid race
* conditions. This must be called with the socket locked.
@@ -1703,7 +1528,6 @@
return req;
}
-
/*
* This will accept the next outstanding connection.
*
@@ -1751,7 +1575,6 @@
goto out;
}
-
/*
* Socket option code for TCP.
*/
@@ -1772,11 +1595,9 @@
if (get_user(val, (int *)optval))
return -EFAULT;
- switch(optname)
- {
+ switch(optname) {
case TCP_MAXSEG:
-/*
- * values greater than interface MTU won't take effect. however at
+/* values greater than interface MTU won't take effect. however at
* the point when this call is done we typically don't yet know
* which interface is going to be used
*/
@@ -1789,7 +1610,7 @@
return 0;
default:
return(-ENOPROTOOPT);
- }
+ };
}
int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval,
@@ -1800,18 +1621,15 @@
int len;
if(level != SOL_TCP)
- {
return tp->af_specific->getsockopt(sk, level, optname,
optval, optlen);
- }
if(get_user(len,optlen))
return -EFAULT;
- len=min(len,sizeof(int));
+ len = min(len,sizeof(int));
- switch(optname)
- {
+ switch(optname) {
case TCP_MAXSEG:
val=sk->user_mss;
break;
@@ -1820,7 +1638,7 @@
break;
default:
return(-ENOPROTOOPT);
- }
+ };
if(put_user(len, optlen))
return -EFAULT;
@@ -1832,13 +1650,9 @@
void tcp_set_keepalive(struct sock *sk, int val)
{
if (!sk->keepopen && val)
- {
tcp_inc_slow_timer(TCP_SLT_KEEPALIVE);
- }
else if (sk->keepopen && !val)
- {
tcp_dec_slow_timer(TCP_SLT_KEEPALIVE);
- }
}
void tcp_init(void)
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov