aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-06-09 15:47:41 +0200
committerPatrick McHardy <kaber@trash.net>2010-06-09 15:47:41 +0200
commit5756d346c7cdefcd84a8ac4901167cdfb5447b69 (patch)
tree4ac2c15adef1bd1e62292815a04ce7c8aefb765a /net
parent2bf074825403e0e0d623bac7573580773b78abef (diff)
downloadkernel_samsung_aries-5756d346c7cdefcd84a8ac4901167cdfb5447b69.zip
kernel_samsung_aries-5756d346c7cdefcd84a8ac4901167cdfb5447b69.tar.gz
kernel_samsung_aries-5756d346c7cdefcd84a8ac4901167cdfb5447b69.tar.bz2
netfilter: ip_queue: rwlock to spinlock conversion
Converts queue_lock rwlock to a spinlock. (readlocked part can be changed by reads of integer values) One atomic operation instead of four per ipq_enqueue_packet() call. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/netfilter/ip_queue.c57
1 files changed, 25 insertions, 32 deletions
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5..d2c1311 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
-static DEFINE_RWLOCK(queue_lock);
+static DEFINE_SPINLOCK(queue_lock);
static int peer_pid __read_mostly;
static unsigned int copy_range __read_mostly;
static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
break;
case IPQ_COPY_PACKET:
- copy_mode = mode;
+ if (range > 0xFFFF)
+ range = 0xFFFF;
copy_range = range;
- if (copy_range > 0xFFFF)
- copy_range = 0xFFFF;
+ copy_mode = mode;
break;
default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
{
struct nf_queue_entry *entry = NULL, *i;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
list_for_each_entry(i, &queue_list, list) {
if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
queue_total--;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return entry;
}
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
static void
ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
{
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
__ipq_flush(cmpfn, data);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
struct nlmsghdr *nlh;
struct timeval tv;
- read_lock_bh(&queue_lock);
-
- switch (copy_mode) {
+ switch (ACCESS_ONCE(copy_mode)) {
case IPQ_COPY_META:
case IPQ_COPY_NONE:
size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
case IPQ_COPY_PACKET:
if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
- (*errp = skb_checksum_help(entry->skb))) {
- read_unlock_bh(&queue_lock);
+ (*errp = skb_checksum_help(entry->skb)))
return NULL;
- }
- if (copy_range == 0 || copy_range > entry->skb->len)
+
+ data_len = ACCESS_ONCE(copy_range);
+ if (data_len == 0 || data_len > entry->skb->len)
data_len = entry->skb->len;
- else
- data_len = copy_range;
size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
break;
default:
*errp = -EINVAL;
- read_unlock_bh(&queue_lock);
return NULL;
}
- read_unlock_bh(&queue_lock);
-
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
if (nskb == NULL)
return status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (!peer_pid)
goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
__ipq_enqueue_entry(entry);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
{
int status;
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
status = __ipq_set_mode(mode, range);
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return status;
}
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
if (security_netlink_recv(skb, CAP_NET_ADMIN))
RCV_SKB_FAIL(-EPERM);
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if (peer_pid) {
if (peer_pid != pid) {
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
RCV_SKB_FAIL(-EBUSY);
}
} else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
peer_pid = pid;
}
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
status = ipq_receive_peer(NLMSG_DATA(nlh), type,
nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
struct netlink_notify *n = ptr;
if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
- write_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
__ipq_reset();
- write_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
}
return NOTIFY_DONE;
}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
#ifdef CONFIG_PROC_FS
static int ip_queue_show(struct seq_file *m, void *v)
{
- read_lock_bh(&queue_lock);
+ spin_lock_bh(&queue_lock);
seq_printf(m,
"Peer PID : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
queue_dropped,
queue_user_dropped);
- read_unlock_bh(&queue_lock);
+ spin_unlock_bh(&queue_lock);
return 0;
}