aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-02-20 23:51:47 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-26 11:42:48 -0800
commit2c4f6219aca5939b57596278ea8b014275d4917b (patch)
tree4635aab17f05da9945e112c61c54e93788417f4e /net/ipv4/tcp.c
parent7f62ad5d37f4e43c841e92c6f159c93dcf2d2cdd (diff)
downloadkernel_samsung_tuna-2c4f6219aca5939b57596278ea8b014275d4917b.zip
kernel_samsung_tuna-2c4f6219aca5939b57596278ea8b014275d4917b.tar.gz
kernel_samsung_tuna-2c4f6219aca5939b57596278ea8b014275d4917b.tar.bz2
[TCP]: Fix MD5 signature pool locking.
The locking calls assumed that these code paths were only invoked in software interrupt context, but that isn't true. Therefore we need to use spin_{lock,unlock}_bh() throughout. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ac6516c..74c4d10 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2266,12 +2266,12 @@ void tcp_free_md5sig_pool(void)
{
struct tcp_md5sig_pool **pool = NULL;
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
if (--tcp_md5sig_users == 0) {
pool = tcp_md5sig_pool;
tcp_md5sig_pool = NULL;
}
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
if (pool)
__tcp_free_md5sig_pool(pool);
}
@@ -2314,36 +2314,36 @@ struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
int alloc = 0;
retry:
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
pool = tcp_md5sig_pool;
if (tcp_md5sig_users++ == 0) {
alloc = 1;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
} else if (!pool) {
tcp_md5sig_users--;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
cpu_relax();
goto retry;
} else
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
return NULL;
}
pool = tcp_md5sig_pool;
if (pool) {
/* oops, it has already been assigned. */
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
__tcp_free_md5sig_pool(p);
} else {
tcp_md5sig_pool = pool = p;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
}
}
return pool;
@@ -2354,11 +2354,11 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
{
struct tcp_md5sig_pool **p;
- spin_lock(&tcp_md5sig_pool_lock);
+ spin_lock_bh(&tcp_md5sig_pool_lock);
p = tcp_md5sig_pool;
if (p)
tcp_md5sig_users++;
- spin_unlock(&tcp_md5sig_pool_lock);
+ spin_unlock_bh(&tcp_md5sig_pool_lock);
return (p ? *per_cpu_ptr(p, cpu) : NULL);
}