aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeil Horman <nhorman@tuxdriver.com>2009-07-30 18:52:15 -0700
committerDavid S. Miller <davem@davemloft.net>2009-07-30 18:52:15 -0700
commita33bc5c15154c835aae26f16e6a3a7d9ad4acb45 (patch)
treecf7683b1b2d2fd170bfc6650ea84a4b9a81eebbf /net
parent9aada7ac047f789ffb27540cc1695989897b2dfe (diff)
downloadkernel_samsung_espresso10-a33bc5c15154c835aae26f16e6a3a7d9ad4acb45.zip
kernel_samsung_espresso10-a33bc5c15154c835aae26f16e6a3a7d9ad4acb45.tar.gz
kernel_samsung_espresso10-a33bc5c15154c835aae26f16e6a3a7d9ad4acb45.tar.bz2
xfrm: select sane defaults for xfrm[4|6] gc_thresh
Choose saner defaults for xfrm[4|6] gc_thresh values on init Currently, the xfrm[4|6] code has hard-coded initial gc_thresh values (set to 1024). Given that the ipv4 and ipv6 routing caches are sized dynamically at boot time, the static selections can be non-sensical. This patch dynamically selects an appropriate gc threshold based on the corresponding main routing table size, using the assumption that we should in the worst case be able to handle as many connections as the routing table can. For ipv4, the maximum route cache size is 16 * the number of hash buckets in the route cache. Given that xfrm4 starts garbage collection at the gc_thresh and prevents new allocations at 2 * gc_thresh, we set gc_thresh to half the maximum route cache size. For ipv6, its a bit trickier. there is no maximum route cache size, but the ipv6 dst_ops gc_thresh is statically set to 1024. It seems sane to select a simmilar gc_thresh for the xfrm6 code that is half the number of hash buckets in the v6 route cache times 16 (like the v4 code does). Signed-off-by: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/xfrm4_policy.c13
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/xfrm6_policy.c15
4 files changed, 33 insertions, 13 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 278f46f..fafbe16 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -3442,7 +3442,7 @@ int __init ip_rt_init(void)
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
- xfrm4_init();
+ xfrm4_init(ip_rt_max_size);
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 26496ba..1ba4474 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -290,10 +290,21 @@ static void __exit xfrm4_policy_fini(void)
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
-void __init xfrm4_init(void)
+void __init xfrm4_init(int rt_max_size)
{
xfrm4_state_init();
xfrm4_policy_init();
+ /*
+ * Select a default value for the gc_thresh based on the main route
+ * table hash size. It seems to me the worst case scenario is when
+ * we have ipsec operating in transport mode, in which we create a
+ * dst_entry per socket. The xfrm gc algorithm starts trying to remove
+ * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
+ * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
+ * That will let us store an ipsec connection per route table entry,
+ * and start cleaning when were 1/2 full
+ */
+ xfrm4_dst_ops.gc_thresh = rt_max_size/2;
sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
xfrm4_policy_table);
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 52ee1dc..0e93ca5 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -164,12 +164,6 @@ static __inline__ void rt6_release(struct rt6_info *rt)
dst_free(&rt->u.dst);
}
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-#define FIB_TABLE_HASHSZ 256
-#else
-#define FIB_TABLE_HASHSZ 1
-#endif
-
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
@@ -180,7 +174,7 @@ static void fib6_link_table(struct net *net, struct fib6_table *tb)
*/
rwlock_init(&tb->tb6_lock);
- h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1);
+ h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
/*
* No protection necessary, this is the only list mutatation
@@ -231,7 +225,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
if (id == 0)
id = RT6_TABLE_MAIN;
- h = id & (FIB_TABLE_HASHSZ - 1);
+ h = id & (FIB6_TABLE_HASHSZ - 1);
rcu_read_lock();
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
@@ -382,7 +376,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
arg.net = net;
w->args = &arg;
- for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
+ for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry(tb, node, head, tb6_hlist) {
@@ -1368,7 +1362,7 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
unsigned int h;
rcu_read_lock();
- for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
write_lock_bh(&table->tb6_lock);
@@ -1483,7 +1477,7 @@ static int fib6_net_init(struct net *net)
if (!net->ipv6.rt6_stats)
goto out_timer;
- net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ,
+ net->ipv6.fib_table_hash = kcalloc(FIB6_TABLE_HASHSZ,
sizeof(*net->ipv6.fib_table_hash),
GFP_KERNEL);
if (!net->ipv6.fib_table_hash)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4acc308..611cffc 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -323,6 +323,7 @@ static struct ctl_table_header *sysctl_hdr;
int __init xfrm6_init(void)
{
int ret;
+ unsigned int gc_thresh;
ret = xfrm6_policy_init();
if (ret)
@@ -331,6 +332,20 @@ int __init xfrm6_init(void)
ret = xfrm6_state_init();
if (ret)
goto out_policy;
+ /*
+ * We need a good default value for the xfrm6 gc threshold.
+ * In ipv4 we set it to the route hash table size * 8, which
+ * is half the size of the maximaum route cache for ipv4. It
+ * would be good to do the same thing for v6, except the table is
+ * constructed differently here. Here each table for a net namespace
+ * can have FIB_TABLE_HASHSZ entries, so lets go with the same
+ * computation that we used for ipv4 here. Also, lets keep the initial
+ * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults
+ * to that as a minimum as well
+ */
+ gc_thresh = FIB6_TABLE_HASHSZ * 8;
+ xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
+
sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
xfrm6_policy_table);
out: