diff options
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/inetpeer.c | 42 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_conn.c | 1 | ||||
-rw-r--r-- | net/ipv4/ipvs/ip_vs_ctl.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/nf_nat_core.c | 2 | ||||
-rw-r--r-- | net/ipv4/route.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 16 |
6 files changed, 32 insertions, 33 deletions
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 771031d..af99519 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -61,7 +61,7 @@ * 4. Global variable peer_total is modified under the pool lock. * 5. struct inet_peer fields modification: * avl_left, avl_right, avl_parent, avl_height: pool lock - * unused_next, unused_prevp: unused node list lock + * unused: unused node list lock * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * dtime: unused node list lock @@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min int inet_peer_gc_mintime __read_mostly = 10 * HZ; int inet_peer_gc_maxtime __read_mostly = 120 * HZ; -static struct inet_peer *inet_peer_unused_head; -static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head; +static LIST_HEAD(unused_peers); static DEFINE_SPINLOCK(inet_peer_unused_lock); static void peer_check_expire(unsigned long dummy); @@ -138,15 +137,7 @@ void __init inet_initpeers(void) static void unlink_from_unused(struct inet_peer *p) { spin_lock_bh(&inet_peer_unused_lock); - if (p->unused_prevp != NULL) { - /* On unused list. */ - *p->unused_prevp = p->unused_next; - if (p->unused_next != NULL) - p->unused_next->unused_prevp = p->unused_prevp; - else - inet_peer_unused_tailp = p->unused_prevp; - p->unused_prevp = NULL; /* mark it as removed */ - } + list_del_init(&p->unused); spin_unlock_bh(&inet_peer_unused_lock); } @@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p) /* May be called with local BH enabled. */ static int cleanup_once(unsigned long ttl) { - struct inet_peer *p; + struct inet_peer *p = NULL; /* Remove the first entry from the list of unused nodes. */ spin_lock_bh(&inet_peer_unused_lock); - p = inet_peer_unused_head; - if (p != NULL) { - __u32 delta = (__u32)jiffies - p->dtime; + if (!list_empty(&unused_peers)) { + __u32 delta; + + p = list_first_entry(&unused_peers, struct inet_peer, unused); + delta = (__u32)jiffies - p->dtime; + if (delta < ttl) { /* Do not prune fresh entries. */ spin_unlock_bh(&inet_peer_unused_lock); return -1; } - inet_peer_unused_head = p->unused_next; - if (p->unused_next != NULL) - p->unused_next->unused_prevp = p->unused_prevp; - else - inet_peer_unused_tailp = p->unused_prevp; - p->unused_prevp = NULL; /* mark as not on the list */ + + list_del_init(&p->unused); + /* Grab an extra reference to prevent node disappearing * before unlink_from_pool() call. */ atomic_inc(&p->refcnt); @@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create) /* Link the node. */ link_to_pool(n); - n->unused_prevp = NULL; /* not on the list */ + INIT_LIST_HEAD(&n->unused); peer_total++; write_unlock_bh(&peer_pool_lock); @@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p) { spin_lock_bh(&inet_peer_unused_lock); if (atomic_dec_and_test(&p->refcnt)) { - p->unused_prevp = inet_peer_unused_tailp; - p->unused_next = NULL; - *inet_peer_unused_tailp = p; - inet_peer_unused_tailp = &p->unused_next; + list_add_tail(&p->unused, &unused_peers); p->dtime = (__u32)jiffies; } spin_unlock_bh(&inet_peer_unused_lock); diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c index b7eeae6..0a9f3c3 100644 --- a/net/ipv4/ipvs/ip_vs_conn.c +++ b/net/ipv4/ipvs/ip_vs_conn.c @@ -441,7 +441,6 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) } else return NULL; } -EXPORT_SYMBOL(ip_vs_try_bind_dest); /* diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 3c4d22a..b64cf45 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -604,7 +604,6 @@ struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport, ip_vs_service_put(svc); return dest; } -EXPORT_SYMBOL(ip_vs_find_dest); /* * Lookup dest by {svc,addr,port} in the destination trash. diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 56e93f6..70e7997 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data) if (!nat) return 0; - memset(nat, 0, sizeof(nat)); + memset(nat, 0, sizeof(*nat)); i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST); return 0; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 4565183..1bff9ed 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work) i = (i + 1) & rt_hash_mask; rthp = &rt_hash_table[i].chain; + if (need_resched()) + cond_resched(); + if (*rthp == NULL) continue; spin_lock_bh(rt_hash_lock_addr(i)); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 20c9440..0f0c1c9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; + if (!tp->packets_out) + goto out; + /* SACK fastpath: * if the only SACK change is the increase of the end_seq of * the first block then only apply that SACK block @@ -1515,6 +1518,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark))) tcp_update_reordering(sk, tp->fackets_out - reord, 0); +out: + #if FASTRETRANS_DEBUG > 0 BUG_TRAP((int)tp->sacked_out >= 0); BUG_TRAP((int)tp->lost_out >= 0); @@ -1669,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk) } tcp_verify_left_out(tp); + /* Too bad if TCP was application limited */ + tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1); + /* Earlier loss recovery underway (see RFC4138; Appendix B). * The last condition is necessary at least in tp->frto_counter case. */ @@ -1701,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; + + TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; /* * Count the retransmission made on RTO correctly (only when * waiting for the first ACK and did not get it)... @@ -1714,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) } else { if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) tp->undo_marker = 0; - TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); + TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; } /* Don't lost mark skbs that were fwd transmitted after RTO */ @@ -3103,11 +3113,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); + if (tp->frto_counter) + frto_cwnd = tcp_process_frto(sk, flag); /* Guarantee sacktag reordering detection against wrap-arounds */ if (before(tp->frto_highmark, tp->snd_una)) tp->frto_highmark = 0; - if (tp->frto_counter) - frto_cwnd = tcp_process_frto(sk, flag); if (tcp_ack_is_dubious(sk, flag)) { /* Advance CWND, if state allows this. */ |