aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2010-03-23 13:39:19 +0000
committerDavid S. Miller <davem@davemloft.net>2010-03-23 23:17:18 -0700
commite51d739ab79110c43ca03daf3ddb3c52dadd38b7 (patch)
treeb15bdd3cb58054cf052d821277408086a1cd7d0e /net/core
parentec43b1a64a132303a6800c781bc17c683aedc55b (diff)
downloadkernel_samsung_tuna-e51d739ab79110c43ca03daf3ddb3c52dadd38b7.zip
kernel_samsung_tuna-e51d739ab79110c43ca03daf3ddb3c52dadd38b7.tar.gz
kernel_samsung_tuna-e51d739ab79110c43ca03daf3ddb3c52dadd38b7.tar.bz2
net: Fix locking in flush_backlog
Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog. Also, flush_backlog can now be called directly from netdev_run_todo. Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index a03aab4..5e3dc28 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2766,17 +2766,19 @@ int netif_receive_skb(struct sk_buff *skb)
EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending */
-static void flush_backlog(void *arg)
+static void flush_backlog(struct net_device *dev, int cpu)
{
- struct net_device *dev = arg;
- struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct softnet_data *queue = &per_cpu(softnet_data, cpu);
struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ spin_lock_irqsave(&queue->input_pkt_queue.lock, flags);
skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
if (skb->dev == dev) {
__skb_unlink(skb, &queue->input_pkt_queue);
kfree_skb(skb);
}
+ spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags);
}
static int napi_gro_complete(struct sk_buff *skb)
@@ -5545,6 +5547,7 @@ void netdev_run_todo(void)
while (!list_empty(&list)) {
struct net_device *dev
= list_first_entry(&list, struct net_device, todo_list);
+ int i;
list_del(&dev->todo_list);
if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
@@ -5556,7 +5559,8 @@ void netdev_run_todo(void)
dev->reg_state = NETREG_UNREGISTERED;
- on_each_cpu(flush_backlog, dev, 1);
+ for_each_online_cpu(i)
+ flush_backlog(dev, i);
netdev_wait_allrefs(dev);