aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-05 15:19:06 -0700
committerDavid S. Miller <davem@davemloft.net>2005-07-05 15:19:06 -0700
commit92df7b518dcb113de8bc2494e3cd275ad887f12b (patch)
tree7632352e63b1a414844c02c929c95cf20121653e /net/ipv4
parenta762a9800752f05fa8768bb0ac35d0e7f1bcfe7f (diff)
downloadkernel_samsung_aries-92df7b518dcb113de8bc2494e3cd275ad887f12b.zip
kernel_samsung_aries-92df7b518dcb113de8bc2494e3cd275ad887f12b.tar.gz
kernel_samsung_aries-92df7b518dcb113de8bc2494e3cd275ad887f12b.tar.bz2
[TCP]: tcp_write_xmit() tabbing cleanup
Put the main basic block of work at the top-level of tabbing, and mark the TCP_CLOSE test with unlikely(). Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_output.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a6375ca..2a8409c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -842,54 +842,54 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
static int tcp_write_xmit(struct sock *sk, int nonagle)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *skb;
unsigned int mss_now;
+ int sent_pkts;
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and all
* will be happy.
*/
- if (sk->sk_state != TCP_CLOSE) {
- struct sk_buff *skb;
- int sent_pkts = 0;
+ if (unlikely(sk->sk_state == TCP_CLOSE))
+ return 0;
- /* Account for SACKS, we may need to fragment due to this.
- * It is just like the real MSS changing on us midstream.
- * We also handle things correctly when the user adds some
- * IP options mid-stream. Silly to do, but cover it.
- */
- mss_now = tcp_current_mss(sk, 1);
-
- while ((skb = sk->sk_send_head) &&
- tcp_snd_test(sk, skb, mss_now,
- tcp_skb_is_last(sk, skb) ? nonagle :
- TCP_NAGLE_PUSH)) {
- if (skb->len > mss_now) {
- if (tcp_fragment(sk, skb, mss_now))
- break;
- }
- TCP_SKB_CB(skb)->when = tcp_time_stamp;
- tcp_tso_set_push(skb);
- if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))
+ /* Account for SACKS, we may need to fragment due to this.
+ * It is just like the real MSS changing on us midstream.
+ * We also handle things correctly when the user adds some
+ * IP options mid-stream. Silly to do, but cover it.
+ */
+ mss_now = tcp_current_mss(sk, 1);
+ sent_pkts = 0;
+ while ((skb = sk->sk_send_head) &&
+ tcp_snd_test(sk, skb, mss_now,
+ tcp_skb_is_last(sk, skb) ? nonagle :
+ TCP_NAGLE_PUSH)) {
+ if (skb->len > mss_now) {
+ if (tcp_fragment(sk, skb, mss_now))
break;
+ }
- /* Advance the send_head. This one is sent out.
- * This call will increment packets_out.
- */
- update_send_head(sk, tp, skb);
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+ tcp_tso_set_push(skb);
+ if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))
+ break;
- tcp_minshall_update(tp, mss_now, skb);
- sent_pkts = 1;
- }
+ /* Advance the send_head. This one is sent out.
+ * This call will increment packets_out.
+ */
+ update_send_head(sk, tp, skb);
- if (sent_pkts) {
- tcp_cwnd_validate(sk, tp);
- return 0;
- }
+ tcp_minshall_update(tp, mss_now, skb);
+ sent_pkts = 1;
+ }
- return !tp->packets_out && sk->sk_send_head;
+ if (sent_pkts) {
+ tcp_cwnd_validate(sk, tp);
+ return 0;
}
- return 0;
+
+ return !tp->packets_out && sk->sk_send_head;
}
/* Push out any pending frames which were held back due to