From 1e94d72feab025b8f7c55d07020602f82f3a97dd Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Thu, 18 Mar 2010 17:45:44 -0700 Subject: rps: Fixed build with CONFIG_SMP not enabled. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- net/core/dev.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 17b1686..1a7e1d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2174,6 +2174,7 @@ int weight_p __read_mostly = 64; /* old backlog weight */ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; +#ifdef CONFIG_SMP /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. @@ -2293,6 +2294,7 @@ static void trigger_softirq(void *data) __napi_schedule(&queue->backlog); __get_cpu_var(netdev_rx_stat).received_rps++; } +#endif /* CONFIG_SMP */ /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog @@ -2320,6 +2322,7 @@ enqueue: /* Schedule NAPI for backlog device */ if (napi_schedule_prep(&queue->backlog)) { +#ifdef CONFIG_SMP if (cpu != smp_processor_id()) { struct rps_remote_softirq_cpus *rcpus = &__get_cpu_var(rps_remote_softirq_cpus); @@ -2328,6 +2331,9 @@ enqueue: __raise_softirq_irqoff(NET_RX_SOFTIRQ); } else __napi_schedule(&queue->backlog); +#else + __napi_schedule(&queue->backlog); +#endif } goto enqueue; } @@ -2367,9 +2373,13 @@ int netif_rx(struct sk_buff *skb) if (!skb->tstamp.tv64) net_timestamp(skb); +#ifdef CONFIG_SMP cpu = get_rps_cpu(skb->dev, skb); if (cpu < 0) cpu = smp_processor_id(); +#else + cpu = smp_processor_id(); +#endif return enqueue_to_backlog(skb, cpu); } @@ -2735,6 +2745,7 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { +#ifdef CONFIG_SMP int cpu; cpu = get_rps_cpu(skb->dev, skb); @@ -2743,6 +2754,9 @@ int netif_receive_skb(struct sk_buff *skb) return __netif_receive_skb(skb); else return enqueue_to_backlog(skb, cpu); +#else + return __netif_receive_skb(skb); +#endif } EXPORT_SYMBOL(netif_receive_skb); @@ -3168,6 +3182,7 @@ void netif_napi_del(struct napi_struct *napi) } EXPORT_SYMBOL(netif_napi_del); +#ifdef CONFIG_SMP /* * net_rps_action sends any pending IPI's for rps. This is only called from * softirq and interrupts must be enabled. @@ -3184,6 +3199,7 @@ static void net_rps_action(cpumask_t *mask) } cpus_clear(*mask); } +#endif static void net_rx_action(struct softirq_action *h) { @@ -3191,8 +3207,10 @@ static void net_rx_action(struct softirq_action *h) unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; +#ifdef CONFIG_SMP int select; struct rps_remote_softirq_cpus *rcpus; +#endif local_irq_disable(); @@ -3255,6 +3273,7 @@ static void net_rx_action(struct softirq_action *h) netpoll_poll_unlock(have); } out: +#ifdef CONFIG_SMP rcpus = &__get_cpu_var(rps_remote_softirq_cpus); select = rcpus->select; rcpus->select ^= 1; @@ -3262,6 +3281,9 @@ out: local_irq_enable(); net_rps_action(&rcpus->mask[select]); +#else + local_irq_enable(); +#endif #ifdef CONFIG_NET_DMA /* @@ -6204,9 +6226,11 @@ static int __init net_dev_init(void) queue->completion_queue = NULL; INIT_LIST_HEAD(&queue->poll_list); +#ifdef CONFIG_SMP queue->csd.func = trigger_softirq; queue->csd.info = queue; queue->csd.flags = 0; +#endif queue->backlog.poll = process_backlog; queue->backlog.weight = weight_p; -- cgit v1.1 From 3ca5b4042ecae5e73c59de62e4ac0db31c10e0f8 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 10 Mar 2010 10:29:35 +0000 Subject: bonding: check return value of nofitier when changing type This patch adds the possibility to refuse the bonding type change for other subsystems (such as for example bridge, vlan, etc.) Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 1a7e1d1..d1f027c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1084,9 +1084,9 @@ void netdev_state_change(struct net_device *dev) } EXPORT_SYMBOL(netdev_state_change); -void netdev_bonding_change(struct net_device *dev, unsigned long event) +int netdev_bonding_change(struct net_device *dev, unsigned long event) { - call_netdevice_notifiers(event, dev); + return call_netdevice_notifiers(event, dev); } EXPORT_SYMBOL(netdev_bonding_change); -- cgit v1.1 From 0641e4fbf2f824faee00ea74c459a088d94905fd Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Mar 2010 21:16:45 -0700 Subject: net: Potential null skb->dev dereference When doing "ifenslave -d bond0 eth0", there is chance to get NULL dereference in netif_receive_skb(), because dev->master suddenly becomes NULL after we tested it. We should use ACCESS_ONCE() to avoid this (or rcu_dereference()) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index bcc490c..59d4394 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2483,6 +2483,7 @@ int netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; + struct net_device *master; struct net_device *null_or_orig; struct net_device *null_or_bond; int ret = NET_RX_DROP; @@ -2503,11 +2504,12 @@ int netif_receive_skb(struct sk_buff *skb) null_or_orig = NULL; orig_dev = skb->dev; - if (orig_dev->master) { - if (skb_bond_should_drop(skb)) + master = ACCESS_ONCE(orig_dev->master); + if (master) { + if (skb_bond_should_drop(skb, master)) null_or_orig = orig_dev; /* deliver only exact match */ else - skb->dev = orig_dev->master; + skb->dev = master; } __get_cpu_var(netdev_rx_stat).total++; -- cgit v1.1 From 32a806c194ea112cfab00f558482dd97bee5e44e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 19 Mar 2010 04:00:23 +0000 Subject: bonding: flush unicast and multicast lists when changing type After the type change, addresses in unicast and multicast lists wouldn't make sense, not to mention possible different lenghts. So flush both lists here. Note "dev_addr_discard" will be very soon replaced by "dev_mc_flush" (once mc_list conversion will be done). Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/dev.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index c0e2608..fe2a754 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4457,12 +4457,13 @@ void dev_unicast_unsync(struct net_device *to, struct net_device *from) } EXPORT_SYMBOL(dev_unicast_unsync); -static void dev_unicast_flush(struct net_device *dev) +void dev_unicast_flush(struct net_device *dev) { netif_addr_lock_bh(dev); __hw_addr_flush(&dev->uc); netif_addr_unlock_bh(dev); } +EXPORT_SYMBOL(dev_unicast_flush); static void dev_unicast_init(struct net_device *dev) { @@ -4484,7 +4485,7 @@ static void __dev_addr_discard(struct dev_addr_list **list) } } -static void dev_addr_discard(struct net_device *dev) +void dev_addr_discard(struct net_device *dev) { netif_addr_lock_bh(dev); @@ -4493,6 +4494,7 @@ static void dev_addr_discard(struct net_device *dev) netif_addr_unlock_bh(dev); } +EXPORT_SYMBOL(dev_addr_discard); /** * dev_get_flags - get flags reported to userspace -- cgit v1.1 From 283f2fe87e980d8af5ad8aa63751e7e3258ee05a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Mar 2010 13:37:40 +0000 Subject: net: speedup netdev_set_master() We currently force a synchronize_net() in netdev_set_master() This seems necessary only when a slave had a master and we dismantle it. In the other case ("ifenslave bond0 ethO"), we dont need this long delay. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index fe2a754..2d01f18 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3757,11 +3757,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master) slave->master = master; - synchronize_net(); - - if (old) + if (old) { + synchronize_net(); dev_put(old); - + } if (master) slave->flags |= IFF_SLAVE; else -- cgit v1.1 From 99fe3c391d50d381687fd84ed0ab22d57079e41f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 18 Mar 2010 11:27:25 +0000 Subject: net: dev_getfirstbyhwtype() optimization Use RCU to avoid RTNL use in dev_getfirstbyhwtype() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 2d01f18..a03aab4 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -772,14 +772,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) { - struct net_device *dev; + struct net_device *dev, *ret = NULL; - rtnl_lock(); - dev = __dev_getfirstbyhwtype(net, type); - if (dev) - dev_hold(dev); - rtnl_unlock(); - return dev; + rcu_read_lock(); + for_each_netdev_rcu(net, dev) + if (dev->type == type) { + dev_hold(dev); + ret = dev; + break; + } + rcu_read_unlock(); + return ret; } EXPORT_SYMBOL(dev_getfirstbyhwtype); -- cgit v1.1 From e51d739ab79110c43ca03daf3ddb3c52dadd38b7 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Tue, 23 Mar 2010 13:39:19 +0000 Subject: net: Fix locking in flush_backlog Need to take spinlocks when dequeuing from input_pkt_queue in flush_backlog. Also, flush_backlog can now be called directly from netdev_run_todo. Signed-off-by: Tom Herbert Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index a03aab4..5e3dc28 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2766,17 +2766,19 @@ int netif_receive_skb(struct sk_buff *skb) EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending */ -static void flush_backlog(void *arg) +static void flush_backlog(struct net_device *dev, int cpu) { - struct net_device *dev = arg; - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *queue = &per_cpu(softnet_data, cpu); struct sk_buff *skb, *tmp; + unsigned long flags; + spin_lock_irqsave(&queue->input_pkt_queue.lock, flags); skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) if (skb->dev == dev) { __skb_unlink(skb, &queue->input_pkt_queue); kfree_skb(skb); } + spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags); } static int napi_gro_complete(struct sk_buff *skb) @@ -5545,6 +5547,7 @@ void netdev_run_todo(void) while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); + int i; list_del(&dev->todo_list); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { @@ -5556,7 +5559,8 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; - on_each_cpu(flush_backlog, dev, 1); + for_each_online_cpu(i) + flush_backlog(dev, i); netdev_wait_allrefs(dev); -- cgit v1.1 From df3345457a7a174dfb5872a070af80d456985038 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 24 Mar 2010 19:13:54 +0000 Subject: rps: add CONFIG_RPS RPS currently depends on SMP and SYSFS Adding a CONFIG_RPS makes sense in case this requirement changes in the future. This patch saves about 1500 bytes of kernel text in case SMP is on but SYSFS is off. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 5e3dc28..bcb3ed2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2177,7 +2177,7 @@ int weight_p __read_mostly = 64; /* old backlog weight */ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. @@ -2325,7 +2325,7 @@ enqueue: /* Schedule NAPI for backlog device */ if (napi_schedule_prep(&queue->backlog)) { -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS if (cpu != smp_processor_id()) { struct rps_remote_softirq_cpus *rcpus = &__get_cpu_var(rps_remote_softirq_cpus); @@ -2376,7 +2376,7 @@ int netif_rx(struct sk_buff *skb) if (!skb->tstamp.tv64) net_timestamp(skb); -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS cpu = get_rps_cpu(skb->dev, skb); if (cpu < 0) cpu = smp_processor_id(); @@ -2750,7 +2750,7 @@ out: */ int netif_receive_skb(struct sk_buff *skb) { -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS int cpu; cpu = get_rps_cpu(skb->dev, skb); @@ -3189,7 +3189,7 @@ void netif_napi_del(struct napi_struct *napi) } EXPORT_SYMBOL(netif_napi_del); -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS /* * net_rps_action sends any pending IPI's for rps. This is only called from * softirq and interrupts must be enabled. @@ -3214,7 +3214,7 @@ static void net_rx_action(struct softirq_action *h) unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS int select; struct rps_remote_softirq_cpus *rcpus; #endif @@ -3280,7 +3280,7 @@ static void net_rx_action(struct softirq_action *h) netpoll_poll_unlock(have); } out: -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS rcpus = &__get_cpu_var(rps_remote_softirq_cpus); select = rcpus->select; rcpus->select ^= 1; @@ -5277,6 +5277,7 @@ int register_netdevice(struct net_device *dev) dev->iflink = -1; +#ifdef CONFIG_RPS if (!dev->num_rx_queues) { /* * Allocate a single RX queue if driver never called @@ -5293,7 +5294,7 @@ int register_netdevice(struct net_device *dev) atomic_set(&dev->_rx->count, 1); dev->num_rx_queues = 1; } - +#endif /* Init, if this function is available */ if (dev->netdev_ops->ndo_init) { ret = dev->netdev_ops->ndo_init(dev); @@ -5653,11 +5654,13 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, void (*setup)(struct net_device *), unsigned int queue_count) { struct netdev_queue *tx; - struct netdev_rx_queue *rx; struct net_device *dev; size_t alloc_size; struct net_device *p; +#ifdef CONFIG_RPS + struct netdev_rx_queue *rx; int i; +#endif BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -5683,6 +5686,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, goto free_p; } +#ifdef CONFIG_RPS rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL); if (!rx) { printk(KERN_ERR "alloc_netdev: Unable to allocate " @@ -5698,6 +5702,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, */ for (i = 0; i < queue_count; i++) rx[i].first = rx; +#endif dev = PTR_ALIGN(p, NETDEV_ALIGN); dev->padded = (char *)dev - (char *)p; @@ -5713,8 +5718,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, dev->num_tx_queues = queue_count; dev->real_num_tx_queues = queue_count; +#ifdef CONFIG_RPS dev->_rx = rx; dev->num_rx_queues = queue_count; +#endif dev->gso_max_size = GSO_MAX_SIZE; @@ -5731,8 +5738,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, return dev; free_rx: +#ifdef CONFIG_RPS kfree(rx); free_tx: +#endif kfree(tx); free_p: kfree(p); @@ -6236,7 +6245,7 @@ static int __init net_dev_init(void) queue->completion_queue = NULL; INIT_LIST_HEAD(&queue->poll_list); -#ifdef CONFIG_SMP +#ifdef CONFIG_RPS queue->csd.func = trigger_softirq; queue->csd.info = queue; queue->csd.flags = 0; -- cgit v1.1 From 10f744d205dde72a0016dbdb11e239da8269958b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 28 Mar 2010 23:07:20 -0700 Subject: net: __netif_receive_skb should be static Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index bcb3ed2..887aa84 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2621,7 +2621,7 @@ void netif_nit_deliver(struct sk_buff *skb) rcu_read_unlock(); } -int __netif_receive_skb(struct sk_buff *skb) +static int __netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; -- cgit v1.1 From 5a0e3ad6af8660be21ca98a971cd00f331318c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Mar 2010 17:04:11 +0900 Subject: include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo Guess-its-ok-by: Christoph Lameter Cc: Ingo Molnar Cc: Lee Schermerhorn --- net/core/dev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 59d4394..1c8a0ce 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include #include -- cgit v1.1 From 152102c7f2bf191690f1069bae292ea3925adf14 Mon Sep 17 00:00:00 2001 From: Changli Gao Date: Tue, 30 Mar 2010 20:16:22 +0000 Subject: rps: keep the old behavior on SMP without rps keep the old behavior on SMP without rps RPS introduces a lock operation to per cpu variable input_pkt_queue on SMP whenever rps is enabled or not. On SMP without RPS, this lock isn't needed at all. Signed-off-by: Changli Gao ---- net/core/dev.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) Signed-off-by: David S. Miller --- net/core/dev.c | 42 ++++++++++++++++++++++++++++-------------- 1 file changed, 28 insertions(+), 14 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 887aa84..427cd53 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -206,6 +206,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } +static inline void rps_lock(struct softnet_data *queue) +{ +#ifdef CONFIG_RPS + spin_lock(&queue->input_pkt_queue.lock); +#endif +} + +static inline void rps_unlock(struct softnet_data *queue) +{ +#ifdef CONFIG_RPS + spin_unlock(&queue->input_pkt_queue.lock); +#endif +} + /* Device list insertion */ static int list_netdevice(struct net_device *dev) { @@ -2313,13 +2327,13 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) local_irq_save(flags); __get_cpu_var(netdev_rx_stat).total++; - spin_lock(&queue->input_pkt_queue.lock); + rps_lock(queue); if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { enqueue: __skb_queue_tail(&queue->input_pkt_queue, skb); - spin_unlock_irqrestore(&queue->input_pkt_queue.lock, - flags); + rps_unlock(queue); + local_irq_restore(flags); return NET_RX_SUCCESS; } @@ -2341,7 +2355,7 @@ enqueue: goto enqueue; } - spin_unlock(&queue->input_pkt_queue.lock); + rps_unlock(queue); __get_cpu_var(netdev_rx_stat).dropped++; local_irq_restore(flags); @@ -2766,19 +2780,19 @@ int netif_receive_skb(struct sk_buff *skb) EXPORT_SYMBOL(netif_receive_skb); /* Network device is going away, flush any packets still pending */ -static void flush_backlog(struct net_device *dev, int cpu) +static void flush_backlog(void *arg) { - struct softnet_data *queue = &per_cpu(softnet_data, cpu); + struct net_device *dev = arg; + struct softnet_data *queue = &__get_cpu_var(softnet_data); struct sk_buff *skb, *tmp; - unsigned long flags; - spin_lock_irqsave(&queue->input_pkt_queue.lock, flags); + rps_lock(queue); skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) if (skb->dev == dev) { __skb_unlink(skb, &queue->input_pkt_queue); kfree_skb(skb); } - spin_unlock_irqrestore(&queue->input_pkt_queue.lock, flags); + rps_unlock(queue); } static int napi_gro_complete(struct sk_buff *skb) @@ -3091,14 +3105,16 @@ static int process_backlog(struct napi_struct *napi, int quota) do { struct sk_buff *skb; - spin_lock_irq(&queue->input_pkt_queue.lock); + local_irq_disable(); + rps_lock(queue); skb = __skb_dequeue(&queue->input_pkt_queue); if (!skb) { __napi_complete(napi); spin_unlock_irq(&queue->input_pkt_queue.lock); break; } - spin_unlock_irq(&queue->input_pkt_queue.lock); + rps_unlock(queue); + local_irq_enable(); __netif_receive_skb(skb); } while (++work < quota && jiffies == start_time); @@ -5548,7 +5564,6 @@ void netdev_run_todo(void) while (!list_empty(&list)) { struct net_device *dev = list_first_entry(&list, struct net_device, todo_list); - int i; list_del(&dev->todo_list); if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) { @@ -5560,8 +5575,7 @@ void netdev_run_todo(void) dev->reg_state = NETREG_UNREGISTERED; - for_each_online_cpu(i) - flush_backlog(dev, i); + on_each_cpu(flush_backlog, dev, 1); netdev_wait_allrefs(dev); -- cgit v1.1 From 5acbbd428db47b12f137a8a2aa96b3c0a96b744e Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Tue, 30 Mar 2010 22:35:50 +0000 Subject: net: change illegal_highdma to use dma_mask Robert Hancock pointed out two problems about NETIF_F_HIGHDMA: -Many drivers only set the flag when they detect they can use 64-bit DMA, since otherwise they could receive DMA addresses that they can't handle (which on platforms without IOMMU/SWIOTLB support is fatal). This means that if 64-bit support isn't available, even buffers located below 4GB will get copied unnecessarily. -Some drivers set the flag even though they can't actually handle 64-bit DMA, which would mean that on platforms without IOMMU/SWIOTLB they would get a DMA mapping error if the memory they received happened to be located above 4GB. http://lkml.org/lkml/2010/3/3/530 We can use the dma_mask if we need bouncing or not here. Then we can safely fix drivers that misuse NETIF_F_HIGHDMA. Signed-off-by: FUJITA Tomonori Signed-off-by: David S. Miller --- net/core/dev.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 427cd53..e19cdae 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -129,6 +129,7 @@ #include #include #include +#include #include "net-sysfs.h" @@ -1804,14 +1805,21 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; + if (!(dev->features & NETIF_F_HIGHDMA)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) + if (PageHighMem(skb_shinfo(skb)->frags[i].page)) + return 1; + } - if (dev->features & NETIF_F_HIGHDMA) - return 0; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) - if (PageHighMem(skb_shinfo(skb)->frags[i].page)) - return 1; + if (PCI_DMA_BUS_IS_PHYS) { + struct device *pdev = dev->dev.parent; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); + if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) + return 1; + } + } #endif return 0; } -- cgit v1.1 From 9092c658bab215b2752fa59d2a36c05b74d1e9e9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 2 Apr 2010 13:34:49 -0700 Subject: net: illegal_highdma() fix Followup to commit 5acbbd428db47b12f137a8a2aa96b3c0a96b744e (net: change illegal_highdma to use dma_mask) If dev->dev.parent is NULL, we should not try to dereference it. Dont force inline illegal_highdma() as its pretty big now. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index e19cdae..c6b5206 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1801,7 +1801,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault); * 2. No high memory really exists on this machine. */ -static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) +static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_HIGHMEM int i; @@ -1814,6 +1814,8 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) if (PCI_DMA_BUS_IS_PHYS) { struct device *pdev = dev->dev.parent; + if (!pdev) + return 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page); if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask) -- cgit v1.1 From a748ee2426817a95b1f03012d8f339c45c722ae1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 1 Apr 2010 21:22:09 +0000 Subject: net: move address list functions to a separate file +little renaming of unicast functions to be smooth with multicast ones Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/dev.c | 430 +-------------------------------------------------------- 1 file changed, 3 insertions(+), 427 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index c6b5206..949c62d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3968,314 +3968,6 @@ void dev_set_rx_mode(struct net_device *dev) netif_addr_unlock_bh(dev); } -/* hw addresses list handling functions */ - -static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, - int addr_len, unsigned char addr_type) -{ - struct netdev_hw_addr *ha; - int alloc_size; - - if (addr_len > MAX_ADDR_LEN) - return -EINVAL; - - list_for_each_entry(ha, &list->list, list) { - if (!memcmp(ha->addr, addr, addr_len) && - ha->type == addr_type) { - ha->refcount++; - return 0; - } - } - - - alloc_size = sizeof(*ha); - if (alloc_size < L1_CACHE_BYTES) - alloc_size = L1_CACHE_BYTES; - ha = kmalloc(alloc_size, GFP_ATOMIC); - if (!ha) - return -ENOMEM; - memcpy(ha->addr, addr, addr_len); - ha->type = addr_type; - ha->refcount = 1; - ha->synced = false; - list_add_tail_rcu(&ha->list, &list->list); - list->count++; - return 0; -} - -static void ha_rcu_free(struct rcu_head *head) -{ - struct netdev_hw_addr *ha; - - ha = container_of(head, struct netdev_hw_addr, rcu_head); - kfree(ha); -} - -static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr, - int addr_len, unsigned char addr_type) -{ - struct netdev_hw_addr *ha; - - list_for_each_entry(ha, &list->list, list) { - if (!memcmp(ha->addr, addr, addr_len) && - (ha->type == addr_type || !addr_type)) { - if (--ha->refcount) - return 0; - list_del_rcu(&ha->list); - call_rcu(&ha->rcu_head, ha_rcu_free); - list->count--; - return 0; - } - } - return -ENOENT; -} - -static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len, - unsigned char addr_type) -{ - int err; - struct netdev_hw_addr *ha, *ha2; - unsigned char type; - - list_for_each_entry(ha, &from_list->list, list) { - type = addr_type ? addr_type : ha->type; - err = __hw_addr_add(to_list, ha->addr, addr_len, type); - if (err) - goto unroll; - } - return 0; - -unroll: - list_for_each_entry(ha2, &from_list->list, list) { - if (ha2 == ha) - break; - type = addr_type ? addr_type : ha2->type; - __hw_addr_del(to_list, ha2->addr, addr_len, type); - } - return err; -} - -static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len, - unsigned char addr_type) -{ - struct netdev_hw_addr *ha; - unsigned char type; - - list_for_each_entry(ha, &from_list->list, list) { - type = addr_type ? addr_type : ha->type; - __hw_addr_del(to_list, ha->addr, addr_len, addr_type); - } -} - -static int __hw_addr_sync(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len) -{ - int err = 0; - struct netdev_hw_addr *ha, *tmp; - - list_for_each_entry_safe(ha, tmp, &from_list->list, list) { - if (!ha->synced) { - err = __hw_addr_add(to_list, ha->addr, - addr_len, ha->type); - if (err) - break; - ha->synced = true; - ha->refcount++; - } else if (ha->refcount == 1) { - __hw_addr_del(to_list, ha->addr, addr_len, ha->type); - __hw_addr_del(from_list, ha->addr, addr_len, ha->type); - } - } - return err; -} - -static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, - struct netdev_hw_addr_list *from_list, - int addr_len) -{ - struct netdev_hw_addr *ha, *tmp; - - list_for_each_entry_safe(ha, tmp, &from_list->list, list) { - if (ha->synced) { - __hw_addr_del(to_list, ha->addr, - addr_len, ha->type); - ha->synced = false; - __hw_addr_del(from_list, ha->addr, - addr_len, ha->type); - } - } -} - -static void __hw_addr_flush(struct netdev_hw_addr_list *list) -{ - struct netdev_hw_addr *ha, *tmp; - - list_for_each_entry_safe(ha, tmp, &list->list, list) { - list_del_rcu(&ha->list); - call_rcu(&ha->rcu_head, ha_rcu_free); - } - list->count = 0; -} - -static void __hw_addr_init(struct netdev_hw_addr_list *list) -{ - INIT_LIST_HEAD(&list->list); - list->count = 0; -} - -/* Device addresses handling functions */ - -static void dev_addr_flush(struct net_device *dev) -{ - /* rtnl_mutex must be held here */ - - __hw_addr_flush(&dev->dev_addrs); - dev->dev_addr = NULL; -} - -static int dev_addr_init(struct net_device *dev) -{ - unsigned char addr[MAX_ADDR_LEN]; - struct netdev_hw_addr *ha; - int err; - - /* rtnl_mutex must be held here */ - - __hw_addr_init(&dev->dev_addrs); - memset(addr, 0, sizeof(addr)); - err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr), - NETDEV_HW_ADDR_T_LAN); - if (!err) { - /* - * Get the first (previously created) address from the list - * and set dev_addr pointer to this location. - */ - ha = list_first_entry(&dev->dev_addrs.list, - struct netdev_hw_addr, list); - dev->dev_addr = ha->addr; - } - return err; -} - -/** - * dev_addr_add - Add a device address - * @dev: device - * @addr: address to add - * @addr_type: address type - * - * Add a device address to the device or increase the reference count if - * it already exists. - * - * The caller must hold the rtnl_mutex. - */ -int dev_addr_add(struct net_device *dev, unsigned char *addr, - unsigned char addr_type) -{ - int err; - - ASSERT_RTNL(); - - err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type); - if (!err) - call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); - return err; -} -EXPORT_SYMBOL(dev_addr_add); - -/** - * dev_addr_del - Release a device address. - * @dev: device - * @addr: address to delete - * @addr_type: address type - * - * Release reference to a device address and remove it from the device - * if the reference count drops to zero. - * - * The caller must hold the rtnl_mutex. - */ -int dev_addr_del(struct net_device *dev, unsigned char *addr, - unsigned char addr_type) -{ - int err; - struct netdev_hw_addr *ha; - - ASSERT_RTNL(); - - /* - * We can not remove the first address from the list because - * dev->dev_addr points to that. - */ - ha = list_first_entry(&dev->dev_addrs.list, - struct netdev_hw_addr, list); - if (ha->addr == dev->dev_addr && ha->refcount == 1) - return -ENOENT; - - err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, - addr_type); - if (!err) - call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); - return err; -} -EXPORT_SYMBOL(dev_addr_del); - -/** - * dev_addr_add_multiple - Add device addresses from another device - * @to_dev: device to which addresses will be added - * @from_dev: device from which addresses will be added - * @addr_type: address type - 0 means type will be used from from_dev - * - * Add device addresses of the one device to another. - ** - * The caller must hold the rtnl_mutex. - */ -int dev_addr_add_multiple(struct net_device *to_dev, - struct net_device *from_dev, - unsigned char addr_type) -{ - int err; - - ASSERT_RTNL(); - - if (from_dev->addr_len != to_dev->addr_len) - return -EINVAL; - err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, - to_dev->addr_len, addr_type); - if (!err) - call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); - return err; -} -EXPORT_SYMBOL(dev_addr_add_multiple); - -/** - * dev_addr_del_multiple - Delete device addresses by another device - * @to_dev: device where the addresses will be deleted - * @from_dev: device by which addresses the addresses will be deleted - * @addr_type: address type - 0 means type will used from from_dev - * - * Deletes addresses in to device by the list of addresses in from device. - * - * The caller must hold the rtnl_mutex. - */ -int dev_addr_del_multiple(struct net_device *to_dev, - struct net_device *from_dev, - unsigned char addr_type) -{ - ASSERT_RTNL(); - - if (from_dev->addr_len != to_dev->addr_len) - return -EINVAL; - __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs, - to_dev->addr_len, addr_type); - call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev); - return 0; -} -EXPORT_SYMBOL(dev_addr_del_multiple); - /* multicast addresses handling functions */ int __dev_addr_delete(struct dev_addr_list **list, int *count, @@ -4336,57 +4028,6 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, return 0; } -/** - * dev_unicast_delete - Release secondary unicast address. - * @dev: device - * @addr: address to delete - * - * Release reference to a secondary unicast address and remove it - * from the device if the reference count drops to zero. - * - * The caller must hold the rtnl_mutex. - */ -int dev_unicast_delete(struct net_device *dev, void *addr) -{ - int err; - - ASSERT_RTNL(); - - netif_addr_lock_bh(dev); - err = __hw_addr_del(&dev->uc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_UNICAST); - if (!err) - __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); - return err; -} -EXPORT_SYMBOL(dev_unicast_delete); - -/** - * dev_unicast_add - add a secondary unicast address - * @dev: device - * @addr: address to add - * - * Add a secondary unicast address to the device or increase - * the reference count if it already exists. - * - * The caller must hold the rtnl_mutex. - */ -int dev_unicast_add(struct net_device *dev, void *addr) -{ - int err; - - ASSERT_RTNL(); - - netif_addr_lock_bh(dev); - err = __hw_addr_add(&dev->uc, addr, dev->addr_len, - NETDEV_HW_ADDR_T_UNICAST); - if (!err) - __dev_set_rx_mode(dev); - netif_addr_unlock_bh(dev); - return err; -} -EXPORT_SYMBOL(dev_unicast_add); int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count) @@ -4436,71 +4077,6 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, } EXPORT_SYMBOL_GPL(__dev_addr_unsync); -/** - * dev_unicast_sync - Synchronize device's unicast list to another device - * @to: destination device - * @from: source device - * - * Add newly added addresses to the destination device and release - * addresses that have no users left. The source device must be - * locked by netif_tx_lock_bh. - * - * This function is intended to be called from the dev->set_rx_mode - * function of layered software devices. - */ -int dev_unicast_sync(struct net_device *to, struct net_device *from) -{ - int err = 0; - - if (to->addr_len != from->addr_len) - return -EINVAL; - - netif_addr_lock_bh(to); - err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); - if (!err) - __dev_set_rx_mode(to); - netif_addr_unlock_bh(to); - return err; -} -EXPORT_SYMBOL(dev_unicast_sync); - -/** - * dev_unicast_unsync - Remove synchronized addresses from the destination device - * @to: destination device - * @from: source device - * - * Remove all addresses that were added to the destination device by - * dev_unicast_sync(). This function is intended to be called from the - * dev->stop function of layered software devices. - */ -void dev_unicast_unsync(struct net_device *to, struct net_device *from) -{ - if (to->addr_len != from->addr_len) - return; - - netif_addr_lock_bh(from); - netif_addr_lock(to); - __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); - __dev_set_rx_mode(to); - netif_addr_unlock(to); - netif_addr_unlock_bh(from); -} -EXPORT_SYMBOL(dev_unicast_unsync); - -void dev_unicast_flush(struct net_device *dev) -{ - netif_addr_lock_bh(dev); - __hw_addr_flush(&dev->uc); - netif_addr_unlock_bh(dev); -} -EXPORT_SYMBOL(dev_unicast_flush); - -static void dev_unicast_init(struct net_device *dev) -{ - __hw_addr_init(&dev->uc); -} - - static void __dev_addr_discard(struct dev_addr_list **list) { struct dev_addr_list *tmp; @@ -5153,7 +4729,7 @@ static void rollback_registered_many(struct list_head *head) /* * Flush the unicast and multicast chains */ - dev_unicast_flush(dev); + dev_uc_flush(dev); dev_addr_discard(dev); if (dev->netdev_ops->ndo_uninit) @@ -5734,7 +5310,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, if (dev_addr_init(dev)) goto free_rx; - dev_unicast_init(dev); + dev_uc_init(dev); dev_net_set(dev, &init_net); @@ -5968,7 +5544,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char /* * Flush the unicast and multicast chains */ - dev_unicast_flush(dev); + dev_uc_flush(dev); dev_addr_discard(dev); netdev_unregister_kobject(dev); -- cgit v1.1 From 22bedad3ce112d5ca1eaf043d4990fa2ed698c87 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 1 Apr 2010 21:22:57 +0000 Subject: net: convert multicast list to list_head Converts the list and the core manipulating with it to be the same as uc_list. +uses two functions for adding/removing mc address (normal and "global" variant) instead of a function parameter. +removes dev_mcast.c completely. +exposes netdev_hw_addr_list_* macros along with __hw_addr_* functions for manipulation with lists on a sandbox (used in bonding and 80211 drivers) Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/dev.c | 145 ++------------------------------------------------------- 1 file changed, 5 insertions(+), 140 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 949c62d..2a9b7dd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3968,140 +3968,6 @@ void dev_set_rx_mode(struct net_device *dev) netif_addr_unlock_bh(dev); } -/* multicast addresses handling functions */ - -int __dev_addr_delete(struct dev_addr_list **list, int *count, - void *addr, int alen, int glbl) -{ - struct dev_addr_list *da; - - for (; (da = *list) != NULL; list = &da->next) { - if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && - alen == da->da_addrlen) { - if (glbl) { - int old_glbl = da->da_gusers; - da->da_gusers = 0; - if (old_glbl == 0) - break; - } - if (--da->da_users) - return 0; - - *list = da->next; - kfree(da); - (*count)--; - return 0; - } - } - return -ENOENT; -} - -int __dev_addr_add(struct dev_addr_list **list, int *count, - void *addr, int alen, int glbl) -{ - struct dev_addr_list *da; - - for (da = *list; da != NULL; da = da->next) { - if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 && - da->da_addrlen == alen) { - if (glbl) { - int old_glbl = da->da_gusers; - da->da_gusers = 1; - if (old_glbl) - return 0; - } - da->da_users++; - return 0; - } - } - - da = kzalloc(sizeof(*da), GFP_ATOMIC); - if (da == NULL) - return -ENOMEM; - memcpy(da->da_addr, addr, alen); - da->da_addrlen = alen; - da->da_users = 1; - da->da_gusers = glbl ? 1 : 0; - da->next = *list; - *list = da; - (*count)++; - return 0; -} - - -int __dev_addr_sync(struct dev_addr_list **to, int *to_count, - struct dev_addr_list **from, int *from_count) -{ - struct dev_addr_list *da, *next; - int err = 0; - - da = *from; - while (da != NULL) { - next = da->next; - if (!da->da_synced) { - err = __dev_addr_add(to, to_count, - da->da_addr, da->da_addrlen, 0); - if (err < 0) - break; - da->da_synced = 1; - da->da_users++; - } else if (da->da_users == 1) { - __dev_addr_delete(to, to_count, - da->da_addr, da->da_addrlen, 0); - __dev_addr_delete(from, from_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } - return err; -} -EXPORT_SYMBOL_GPL(__dev_addr_sync); - -void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, - struct dev_addr_list **from, int *from_count) -{ - struct dev_addr_list *da, *next; - - da = *from; - while (da != NULL) { - next = da->next; - if (da->da_synced) { - __dev_addr_delete(to, to_count, - da->da_addr, da->da_addrlen, 0); - da->da_synced = 0; - __dev_addr_delete(from, from_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } -} -EXPORT_SYMBOL_GPL(__dev_addr_unsync); - -static void __dev_addr_discard(struct dev_addr_list **list) -{ - struct dev_addr_list *tmp; - - while (*list != NULL) { - tmp = *list; - *list = tmp->next; - if (tmp->da_users > tmp->da_gusers) - printk("__dev_addr_discard: address leakage! " - "da_users=%d\n", tmp->da_users); - kfree(tmp); - } -} - -void dev_addr_discard(struct net_device *dev) -{ - netif_addr_lock_bh(dev); - - __dev_addr_discard(&dev->mc_list); - netdev_mc_count(dev) = 0; - - netif_addr_unlock_bh(dev); -} -EXPORT_SYMBOL(dev_addr_discard); - /** * dev_get_flags - get flags reported to userspace * @dev: device @@ -4412,8 +4278,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCDELMULTI: if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || @@ -4421,8 +4286,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) return -EINVAL; if (!netif_device_present(dev)) return -ENODEV; - return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, - dev->addr_len, 1); + return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data); case SIOCSIFTXQLEN: if (ifr->ifr_qlen < 0) @@ -4730,7 +4594,7 @@ static void rollback_registered_many(struct list_head *head) * Flush the unicast and multicast chains */ dev_uc_flush(dev); - dev_addr_discard(dev); + dev_mc_flush(dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); @@ -5310,6 +5174,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, if (dev_addr_init(dev)) goto free_rx; + dev_mc_init(dev); dev_uc_init(dev); dev_net_set(dev, &init_net); @@ -5545,7 +5410,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char * Flush the unicast and multicast chains */ dev_uc_flush(dev); - dev_addr_discard(dev); + dev_mc_flush(dev); netdev_unregister_kobject(dev); -- cgit v1.1 From 5a6d234e73d7d021c74e1aa349b3b37b81372c66 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Mon, 5 Apr 2010 14:37:19 -0700 Subject: rps: fixed missed rps_unlock Fix spin_unlock_irq which needs to be rps_unlock. Signed-off-by: Tom Herbert Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 2a9b7dd..74f77ca 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3120,7 +3120,7 @@ static int process_backlog(struct napi_struct *napi, int quota) skb = __skb_dequeue(&queue->input_pkt_queue); if (!skb) { __napi_complete(napi); - spin_unlock_irq(&queue->input_pkt_queue.lock); + rps_unlock(queue); break; } rps_unlock(queue); -- cgit v1.1 From e4008276fddd10445ff06707694a938cb7f35ed4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 5 Apr 2010 15:42:39 -0700 Subject: net: Add a missing local_irq_enable() As noticed by Changli Gao, we must call local_irq_enable() after rps_unlock() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 74f77ca..b98ddc6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3121,6 +3121,7 @@ static int process_backlog(struct napi_struct *napi, int quota) if (!skb) { __napi_complete(napi); rps_unlock(queue); + local_irq_enable(); break; } rps_unlock(queue); -- cgit v1.1 From 7a161ea92471087a1579239d7a58dd06eaa5601c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Apr 2010 21:26:13 +0000 Subject: net: Dont use netdev_warn() Dont use netdev_warn() in dev_cap_txqueue() and get_rps_cpu() so that we can catch following warnings without crash. bond0.2240 received packet on queue 6, but number of RX queues is 1 bond0.2240 received packet on queue 11, but number of RX queues is 1 Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index a10a216..0eb79e3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1987,9 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) { if (unlikely(queue_index >= dev->real_num_tx_queues)) { if (net_ratelimit()) { - netdev_warn(dev, "selects TX queue %d, but " - "real number of TX queues is %d\n", - queue_index, dev->real_num_tx_queues); + pr_warning("%s selects TX queue %d, but " + "real number of TX queues is %d\n", + dev->name, queue_index, dev->real_num_tx_queues); } return 0; } @@ -2223,9 +2223,9 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->num_rx_queues)) { if (net_ratelimit()) { - netdev_warn(dev, "received packet on queue " - "%u, but number of RX queues is %u\n", - index, dev->num_rx_queues); + pr_warning("%s received packet on queue " + "%u, but number of RX queues is %u\n", + dev->name, index, dev->num_rx_queues); } goto done; } -- cgit v1.1 From b6c6712a42ca3f9fa7f4a3d7c40e3a9dd1fd9e03 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 8 Apr 2010 23:03:29 +0000 Subject: net: sk_dst_cache RCUification With latest CONFIG_PROVE_RCU stuff, I felt more comfortable to make this work. sk->sk_dst_cache is currently protected by a rwlock (sk_dst_lock) This rwlock is readlocked for a very small amount of time, and dst entries are already freed after RCU grace period. This calls for RCU again :) This patch converts sk_dst_lock to a spinlock, and use RCU for readers. __sk_dst_get() is supposed to be called with rcu_read_lock() or if socket locked by user, so use appropriate rcu_dereference_check() condition (rcu_read_lock_held() || sock_owned_by_user(sk)) This patch avoids two atomic ops per tx packet on UDP connected sockets, for example, and permits sk_dst_lock to be much less dirtied. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 0eb79e3..ca4cdef 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2015,7 +2015,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, if (dev->real_num_tx_queues > 1) queue_index = skb_tx_hash(dev, skb); - if (sk && sk->sk_dst_cache) + if (sk && rcu_dereference_check(sk->sk_dst_cache, 1)) sk_tx_queue_set(sk, queue_index); } } -- cgit v1.1 From acbbc07145b919248c410e1852b953d385be5c97 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 11 Apr 2010 06:56:11 +0000 Subject: net: uninline skb_bond_should_drop() skb_bond_should_drop() is too big to be inlined. This patch reduces kernel text size, and its compilation time as well (shrinking include/linux/netdevice.h) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index ca4cdef..876b111 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2646,6 +2646,55 @@ void netif_nit_deliver(struct sk_buff *skb) rcu_read_unlock(); } +static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, + struct net_device *master) +{ + if (skb->pkt_type == PACKET_HOST) { + u16 *dest = (u16 *) eth_hdr(skb)->h_dest; + + memcpy(dest, master->dev_addr, ETH_ALEN); + } +} + +/* On bonding slaves other than the currently active slave, suppress + * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and + * ARP on active-backup slaves with arp_validate enabled. + */ +int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master) +{ + struct net_device *dev = skb->dev; + + if (master->priv_flags & IFF_MASTER_ARPMON) + dev->last_rx = jiffies; + + if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { + /* Do address unmangle. The local destination address + * will be always the one master has. Provides the right + * functionality in a bridge. + */ + skb_bond_set_mac_by_master(skb, master); + } + + if (dev->priv_flags & IFF_SLAVE_INACTIVE) { + if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && + skb->protocol == __cpu_to_be16(ETH_P_ARP)) + return 0; + + if (master->priv_flags & IFF_MASTER_ALB) { + if (skb->pkt_type != PACKET_BROADCAST && + skb->pkt_type != PACKET_MULTICAST) + return 0; + } + if (master->priv_flags & IFF_MASTER_8023AD && + skb->protocol == __cpu_to_be16(ETH_P_SLOW)) + return 0; + + return 1; + } + return 0; +} +EXPORT_SYMBOL(__skb_bond_should_drop); + static int __netif_receive_skb(struct sk_buff *skb) { struct packet_type *ptype, *pt_prev; -- cgit v1.1 From b0e28f1effd1d840b36e961edc1def81e01b1ca1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 15 Apr 2010 00:14:07 -0700 Subject: net: netif_rx() must disable preemption Eric Paris reported netif_rx() is calling smp_processor_id() from preemptible context, in particular when caller is ip_dev_loopback_xmit(). RPS commit added this smp_processor_id() call, this patch makes sure preemption is disabled. rps_get_cpus() wants rcu_read_lock() anyway, we can dot it a bit earlier. Reported-by: Eric Paris Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 876b111..e8041eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2206,6 +2206,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. + * rcu_read_lock must be held on entry. */ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) { @@ -2217,8 +2218,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) u8 ip_proto; u32 addr1, addr2, ports, ihl; - rcu_read_lock(); - if (skb_rx_queue_recorded(skb)) { u16 index = skb_get_rx_queue(skb); if (unlikely(index >= dev->num_rx_queues)) { @@ -2296,7 +2295,6 @@ got_hash: } done: - rcu_read_unlock(); return cpu; } @@ -2392,7 +2390,7 @@ enqueue: int netif_rx(struct sk_buff *skb) { - int cpu; + int ret; /* if netpoll wants it, pretend we never saw it */ if (netpoll_rx(skb)) @@ -2402,14 +2400,21 @@ int netif_rx(struct sk_buff *skb) net_timestamp(skb); #ifdef CONFIG_RPS - cpu = get_rps_cpu(skb->dev, skb); - if (cpu < 0) - cpu = smp_processor_id(); + { + int cpu; + + rcu_read_lock(); + cpu = get_rps_cpu(skb->dev, skb); + if (cpu < 0) + cpu = smp_processor_id(); + ret = enqueue_to_backlog(skb, cpu); + rcu_read_unlock(); + } #else - cpu = smp_processor_id(); + ret = enqueue_to_backlog(skb, get_cpu()); + put_cpu(); #endif - - return enqueue_to_backlog(skb, cpu); + return ret; } EXPORT_SYMBOL(netif_rx); -- cgit v1.1 From fec5e652e58fa6017b2c9e06466cb2a6538de5b4 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 16 Apr 2010 16:01:27 -0700 Subject: rfs: Receive Flow Steering This patch implements receive flow steering (RFS). RFS steers received packets for layer 3 and 4 processing to the CPU where the application for the corresponding flow is running. RFS is an extension of Receive Packet Steering (RPS). The basic idea of RFS is that when an application calls recvmsg (or sendmsg) the application's running CPU is stored in a hash table that is indexed by the connection's rxhash which is stored in the socket structure. The rxhash is passed in skb's received on the connection from netif_receive_skb. For each received packet, the associated rxhash is used to look up the CPU in the hash table, if a valid CPU is set then the packet is steered to that CPU using the RPS mechanisms. The convolution of the simple approach is that it would potentially allow OOO packets. If threads are thrashing around CPUs or multiple threads are trying to read from the same sockets, a quickly changing CPU value in the hash table could cause rampant OOO packets-- we consider this a non-starter. To avoid OOO packets, this solution implements two types of hash tables: rps_sock_flow_table and rps_dev_flow_table. rps_sock_table is a global hash table. Each entry is just a CPU number and it is populated in recvmsg and sendmsg as described above. This table contains the "desired" CPUs for flows. rps_dev_flow_table is specific to each device queue. Each entry contains a CPU and a tail queue counter. The CPU is the "current" CPU for a matching flow. The tail queue counter holds the value of a tail queue counter for the associated CPU's backlog queue at the time of last enqueue for a flow matching the entry. Each backlog queue has a queue head counter which is incremented on dequeue, and so a queue tail counter is computed as queue head count + queue length. When a packet is enqueued on a backlog queue, the current value of the queue tail counter is saved in the hash entry of the rps_dev_flow_table. And now the trick: when selecting the CPU for RPS (get_rps_cpu) the rps_sock_flow table and the rps_dev_flow table for the RX queue are consulted. When the desired CPU for the flow (found in the rps_sock_flow table) does not match the current CPU (found in the rps_dev_flow table), the current CPU is changed to the desired CPU if one of the following is true: - The current CPU is unset (equal to RPS_NO_CPU) - Current CPU is offline - The current CPU's queue head counter >= queue tail counter in the rps_dev_flow table. This checks if the queue tail has advanced beyond the last packet that was enqueued using this table entry. This guarantees that all packets queued using this entry have been dequeued, thus preserving in order delivery. Making each queue have its own rps_dev_flow table has two advantages: 1) the tail queue counters will be written on each receive, so keeping the table local to interrupting CPU s good for locality. 2) this allows lockless access to the table-- the CPU number and queue tail counter need to be accessed together under mutual exclusion from netif_receive_skb, we assume that this is only called from device napi_poll which is non-reentrant. This patch implements RFS for TCP and connected UDP sockets. It should be usable for other flow oriented protocols. There are two configuration parameters for RFS. The "rps_flow_entries" kernel init parameter sets the number of entries in the rps_sock_flow_table, the per rxqueue sysfs entry "rps_flow_cnt" contains the number of entries in the rps_dev_flow table for the rxqueue. Both are rounded to power of two. The obvious benefit of RFS (over just RPS) is that it achieves CPU locality between the receive processing for a flow and the applications processing; this can result in increased performance (higher pps, lower latency). The benefits of RFS are dependent on cache hierarchy, application load, and other factors. On simple benchmarks, we don't necessarily see improvement and sometimes see degradation. However, for more complex benchmarks and for applications where cache pressure is much higher this technique seems to perform very well. Below are some benchmark results which show the potential benfit of this patch. The netperf test has 500 instances of netperf TCP_RR test with 1 byte req. and resp. The RPC test is an request/response test similar in structure to netperf RR test ith 100 threads on each host, but does more work in userspace that netperf. e1000e on 8 core Intel No RFS or RPS 104K tps at 30% CPU No RFS (best RPS config): 290K tps at 63% CPU RFS 303K tps at 61% CPU RPC test tps CPU% 50/90/99% usec latency Latency StdDev No RFS/RPS 103K 48% 757/900/3185 4472.35 RPS only: 174K 73% 415/993/2468 491.66 RFS 223K 73% 379/651/1382 315.61 Signed-off-by: Tom Herbert Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 91 insertions(+), 20 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index e8041eb..d7107ac 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2203,19 +2203,28 @@ int weight_p __read_mostly = 64; /* old backlog weight */ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; #ifdef CONFIG_RPS + +/* One global table that all flow-based protocols share. */ +struct rps_sock_flow_table *rps_sock_flow_table; +EXPORT_SYMBOL(rps_sock_flow_table); + /* * get_rps_cpu is called from netif_receive_skb and returns the target * CPU from the RPS map of the receiving queue for a given skb. * rcu_read_lock must be held on entry. */ -static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) +static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, + struct rps_dev_flow **rflowp) { struct ipv6hdr *ip6; struct iphdr *ip; struct netdev_rx_queue *rxqueue; struct rps_map *map; + struct rps_dev_flow_table *flow_table; + struct rps_sock_flow_table *sock_flow_table; int cpu = -1; u8 ip_proto; + u16 tcpu; u32 addr1, addr2, ports, ihl; if (skb_rx_queue_recorded(skb)) { @@ -2232,7 +2241,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) } else rxqueue = dev->_rx; - if (!rxqueue->rps_map) + if (!rxqueue->rps_map && !rxqueue->rps_flow_table) goto done; if (skb->rxhash) @@ -2284,9 +2293,48 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb) skb->rxhash = 1; got_hash: + flow_table = rcu_dereference(rxqueue->rps_flow_table); + sock_flow_table = rcu_dereference(rps_sock_flow_table); + if (flow_table && sock_flow_table) { + u16 next_cpu; + struct rps_dev_flow *rflow; + + rflow = &flow_table->flows[skb->rxhash & flow_table->mask]; + tcpu = rflow->cpu; + + next_cpu = sock_flow_table->ents[skb->rxhash & + sock_flow_table->mask]; + + /* + * If the desired CPU (where last recvmsg was done) is + * different from current CPU (one in the rx-queue flow + * table entry), switch if one of the following holds: + * - Current CPU is unset (equal to RPS_NO_CPU). + * - Current CPU is offline. + * - The current CPU's queue tail has advanced beyond the + * last packet that was enqueued using this table entry. + * This guarantees that all previous packets for the flow + * have been dequeued, thus preserving in order delivery. + */ + if (unlikely(tcpu != next_cpu) && + (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || + ((int)(per_cpu(softnet_data, tcpu).input_queue_head - + rflow->last_qtail)) >= 0)) { + tcpu = rflow->cpu = next_cpu; + if (tcpu != RPS_NO_CPU) + rflow->last_qtail = per_cpu(softnet_data, + tcpu).input_queue_head; + } + if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { + *rflowp = rflow; + cpu = tcpu; + goto done; + } + } + map = rcu_dereference(rxqueue->rps_map); if (map) { - u16 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; + tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32]; if (cpu_online(tcpu)) { cpu = tcpu; @@ -2320,13 +2368,14 @@ static void trigger_softirq(void *data) __napi_schedule(&queue->backlog); __get_cpu_var(netdev_rx_stat).received_rps++; } -#endif /* CONFIG_SMP */ +#endif /* CONFIG_RPS */ /* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). */ -static int enqueue_to_backlog(struct sk_buff *skb, int cpu) +static int enqueue_to_backlog(struct sk_buff *skb, int cpu, + unsigned int *qtail) { struct softnet_data *queue; unsigned long flags; @@ -2341,6 +2390,10 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu) if (queue->input_pkt_queue.qlen) { enqueue: __skb_queue_tail(&queue->input_pkt_queue, skb); +#ifdef CONFIG_RPS + *qtail = queue->input_queue_head + + queue->input_pkt_queue.qlen; +#endif rps_unlock(queue); local_irq_restore(flags); return NET_RX_SUCCESS; @@ -2355,11 +2408,10 @@ enqueue: cpu_set(cpu, rcpus->mask[rcpus->select]); __raise_softirq_irqoff(NET_RX_SOFTIRQ); - } else - __napi_schedule(&queue->backlog); -#else - __napi_schedule(&queue->backlog); + goto enqueue; + } #endif + __napi_schedule(&queue->backlog); } goto enqueue; } @@ -2401,18 +2453,25 @@ int netif_rx(struct sk_buff *skb) #ifdef CONFIG_RPS { + struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; rcu_read_lock(); - cpu = get_rps_cpu(skb->dev, skb); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); if (cpu < 0) cpu = smp_processor_id(); - ret = enqueue_to_backlog(skb, cpu); + + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + rcu_read_unlock(); } #else - ret = enqueue_to_backlog(skb, get_cpu()); - put_cpu(); + { + unsigned int qtail; + ret = enqueue_to_backlog(skb, get_cpu(), &qtail); + put_cpu(); + } #endif return ret; } @@ -2830,14 +2889,22 @@ out: int netif_receive_skb(struct sk_buff *skb) { #ifdef CONFIG_RPS - int cpu; + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu, ret; + + rcu_read_lock(); - cpu = get_rps_cpu(skb->dev, skb); + cpu = get_rps_cpu(skb->dev, skb, &rflow); - if (cpu < 0) - return __netif_receive_skb(skb); - else - return enqueue_to_backlog(skb, cpu); + if (cpu >= 0) { + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + rcu_read_unlock(); + } else { + rcu_read_unlock(); + ret = __netif_receive_skb(skb); + } + + return ret; #else return __netif_receive_skb(skb); #endif @@ -2856,6 +2923,7 @@ static void flush_backlog(void *arg) if (skb->dev == dev) { __skb_unlink(skb, &queue->input_pkt_queue); kfree_skb(skb); + incr_input_queue_head(queue); } rps_unlock(queue); } @@ -3179,6 +3247,7 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); break; } + incr_input_queue_head(queue); rps_unlock(queue); local_irq_enable(); @@ -5542,8 +5611,10 @@ static int dev_cpu_callback(struct notifier_block *nfb, local_irq_enable(); /* Process offline CPU's input_pkt_queue */ - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); + incr_input_queue_head(oldsd); + } return NOTIFY_OK; } -- cgit v1.1 From 8770acf0494ae06de6abd34f951a436f8f15d1de Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 17 Apr 2010 00:54:36 -0700 Subject: rps: rps_sock_flow_table is mostly read Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index d7107ac..7abf959 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2205,7 +2205,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; #ifdef CONFIG_RPS /* One global table that all flow-based protocols share. */ -struct rps_sock_flow_table *rps_sock_flow_table; +struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); /* -- cgit v1.1 From 9958da0501fced47c1ac5c5a3a7731c87e45472c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 17 Apr 2010 04:17:02 +0000 Subject: net: remove time limit in process_backlog() - There is no point to enforce a time limit in process_backlog(), since other napi instances dont follow same rule. We can exit after only one packet processed... The normal quota of 64 packets per napi instance should be the norm, and net_rx_action() already has its own time limit. Note : /proc/net/core/dev_weight can be used to tune this 64 default value. - Use DEFINE_PER_CPU_ALIGNED for softnet_data definition. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 7abf959..8092f01 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -264,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain); * queue in the local softnet handler. */ -DEFINE_PER_CPU(struct softnet_data, softnet_data); +DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); EXPORT_PER_CPU_SYMBOL(softnet_data); #ifdef CONFIG_LOCKDEP @@ -3232,7 +3232,6 @@ static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *queue = &__get_cpu_var(softnet_data); - unsigned long start_time = jiffies; napi->weight = weight_p; do { @@ -3252,7 +3251,7 @@ static int process_backlog(struct napi_struct *napi, int quota) local_irq_enable(); __netif_receive_skb(skb); - } while (++work < quota && jiffies == start_time); + } while (++work < quota); return work; } -- cgit v1.1 From fc6055a5ba31e2c14e36e8939f9bf2b6d586a7f5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 16 Apr 2010 12:18:22 +0000 Subject: net: Introduce skb_orphan_try() Transmitted skb might be attached to a socket and a destructor, for memory accounting purposes. Traditionally, this destructor is called at tx completion time, when skb is freed. When tx completion is performed by another cpu than the sender, this forces some cache lines to change ownership. XPS was an attempt to give tx completion to initial cpu. David idea is to call destructor right before giving skb to device (call to ndo_start_xmit()). Because device queues are usually small, orphaning skb before tx completion is not a big deal. Some drivers already do this, we could do it in upper level. There is one known exception to this early orphaning, called tx timestamping. It needs to keep a reference to socket until device can give a hardware or software timestamp. This patch adds a skb_orphan_try() helper, to centralize all exceptions to early orphaning in one spot, and use it in dev_hard_start_xmit(). "tbench 16" results on a Nehalem machine (2 X5570 @ 2.93GHz) before: Throughput 4428.9 MB/sec 16 procs after: Throughput 4448.14 MB/sec 16 procs UDP should get even better results, its destructor being more complex, since SOCK_USE_WRITE_QUEUE is not set (four atomic ops instead of one) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 8092f01..8eb50e2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1880,6 +1880,17 @@ static int dev_gso_segment(struct sk_buff *skb) return 0; } +/* + * Try to orphan skb early, right before transmission by the device. + * We cannot orphan skb if tx timestamp is requested, since + * drivers need to call skb_tstamp_tx() to send the timestamp. + */ +static inline void skb_orphan_try(struct sk_buff *skb) +{ + if (!skb_tx(skb)->flags) + skb_orphan(skb); +} + int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq) { @@ -1904,23 +1915,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(skb); + skb_orphan_try(skb); rc = ops->ndo_start_xmit(skb, dev); if (rc == NETDEV_TX_OK) txq_trans_update(txq); - /* - * TODO: if skb_orphan() was called by - * dev->hard_start_xmit() (for example, the unmodified - * igb driver does that; bnx2 doesn't), then - * skb_tx_software_timestamp() will be unable to send - * back the time stamp. - * - * How can this be prevented? Always create another - * reference to the socket before calling - * dev->hard_start_xmit()? Prevent that skb_orphan() - * does anything in dev->hard_start_xmit() by clearing - * the skb destructor before the call and restoring it - * afterwards, then doing the skb_orphan() ourselves? - */ return rc; } @@ -1938,6 +1936,7 @@ gso: if (dev->priv_flags & IFF_XMIT_DST_RELEASE) skb_dst_drop(nskb); + skb_orphan_try(nskb); rc = ops->ndo_start_xmit(nskb, dev); if (unlikely(rc != NETDEV_TX_OK)) { if (rc & ~NETDEV_TX_MASK) -- cgit v1.1 From 88751275b8e867d756e4f86ae92afe0232de129f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Apr 2010 05:07:33 +0000 Subject: rps: shortcut net_rps_action() net_rps_action() is a bit expensive on NR_CPUS=64..4096 kernels, even if RPS is not active. Tom Herbert used two bitmasks to hold information needed to send IPI, but a single LIFO list seems more appropriate. Move all RPS logic into net_rps_action() to cleanup net_rx_action() code (remove two ifdefs) Move rps_remote_softirq_cpus into softnet_data to share its first cache line, filling an existing hole. In a future patch, we could call net_rps_action() from process_backlog() to make sure we send IPI before handling this cpu backlog. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 79 ++++++++++++++++++++++++---------------------------------- 1 file changed, 32 insertions(+), 47 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 8eb50e2..05a2b29 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2345,21 +2345,6 @@ done: return cpu; } -/* - * This structure holds the per-CPU mask of CPUs for which IPIs are scheduled - * to be sent to kick remote softirq processing. There are two masks since - * the sending of IPIs must be done with interrupts enabled. The select field - * indicates the current mask that enqueue_backlog uses to schedule IPIs. - * select is flipped before net_rps_action is called while still under lock, - * net_rps_action then uses the non-selected mask to send the IPIs and clears - * it without conflicting with enqueue_backlog operation. - */ -struct rps_remote_softirq_cpus { - cpumask_t mask[2]; - int select; -}; -static DEFINE_PER_CPU(struct rps_remote_softirq_cpus, rps_remote_softirq_cpus); - /* Called from hardirq (IPI) context */ static void trigger_softirq(void *data) { @@ -2402,10 +2387,12 @@ enqueue: if (napi_schedule_prep(&queue->backlog)) { #ifdef CONFIG_RPS if (cpu != smp_processor_id()) { - struct rps_remote_softirq_cpus *rcpus = - &__get_cpu_var(rps_remote_softirq_cpus); + struct softnet_data *myqueue; + + myqueue = &__get_cpu_var(softnet_data); + queue->rps_ipi_next = myqueue->rps_ipi_list; + myqueue->rps_ipi_list = queue; - cpu_set(cpu, rcpus->mask[rcpus->select]); __raise_softirq_irqoff(NET_RX_SOFTIRQ); goto enqueue; } @@ -2910,7 +2897,9 @@ int netif_receive_skb(struct sk_buff *skb) } EXPORT_SYMBOL(netif_receive_skb); -/* Network device is going away, flush any packets still pending */ +/* Network device is going away, flush any packets still pending + * Called with irqs disabled. + */ static void flush_backlog(void *arg) { struct net_device *dev = arg; @@ -3338,24 +3327,33 @@ void netif_napi_del(struct napi_struct *napi) } EXPORT_SYMBOL(netif_napi_del); -#ifdef CONFIG_RPS /* - * net_rps_action sends any pending IPI's for rps. This is only called from - * softirq and interrupts must be enabled. + * net_rps_action sends any pending IPI's for rps. + * Note: called with local irq disabled, but exits with local irq enabled. */ -static void net_rps_action(cpumask_t *mask) +static void net_rps_action(void) { - int cpu; +#ifdef CONFIG_RPS + struct softnet_data *locqueue = &__get_cpu_var(softnet_data); + struct softnet_data *remqueue = locqueue->rps_ipi_list; - /* Send pending IPI's to kick RPS processing on remote cpus. */ - for_each_cpu_mask_nr(cpu, *mask) { - struct softnet_data *queue = &per_cpu(softnet_data, cpu); - if (cpu_online(cpu)) - __smp_call_function_single(cpu, &queue->csd, 0); - } - cpus_clear(*mask); -} + if (remqueue) { + locqueue->rps_ipi_list = NULL; + + local_irq_enable(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + while (remqueue) { + struct softnet_data *next = remqueue->rps_ipi_next; + if (cpu_online(remqueue->cpu)) + __smp_call_function_single(remqueue->cpu, + &remqueue->csd, 0); + remqueue = next; + } + } else #endif + local_irq_enable(); +} static void net_rx_action(struct softirq_action *h) { @@ -3363,10 +3361,6 @@ static void net_rx_action(struct softirq_action *h) unsigned long time_limit = jiffies + 2; int budget = netdev_budget; void *have; -#ifdef CONFIG_RPS - int select; - struct rps_remote_softirq_cpus *rcpus; -#endif local_irq_disable(); @@ -3429,17 +3423,7 @@ static void net_rx_action(struct softirq_action *h) netpoll_poll_unlock(have); } out: -#ifdef CONFIG_RPS - rcpus = &__get_cpu_var(rps_remote_softirq_cpus); - select = rcpus->select; - rcpus->select ^= 1; - - local_irq_enable(); - - net_rps_action(&rcpus->mask[select]); -#else - local_irq_enable(); -#endif + net_rps_action(); #ifdef CONFIG_NET_DMA /* @@ -5839,6 +5823,7 @@ static int __init net_dev_init(void) queue->csd.func = trigger_softirq; queue->csd.info = queue; queue->csd.flags = 0; + queue->cpu = i; #endif queue->backlog.poll = process_backlog; -- cgit v1.1 From e36fa2f7e92f25aab2e3d787dcfe3590817f19d3 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Apr 2010 21:17:14 +0000 Subject: rps: cleanups struct softnet_data holds many queues, so consistent use "sd" name instead of "queue" is better. Adds a rps_ipi_queued() helper to cleanup enqueue_to_backlog() Adds a _and_irq_disable suffix to net_rps_action() name, as David suggested. incr_input_queue_head() becomes input_queue_head_incr() Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 149 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 80 insertions(+), 69 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 05a2b29..7f5755b 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -208,17 +208,17 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; } -static inline void rps_lock(struct softnet_data *queue) +static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_lock(&queue->input_pkt_queue.lock); + spin_lock(&sd->input_pkt_queue.lock); #endif } -static inline void rps_unlock(struct softnet_data *queue) +static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_unlock(&queue->input_pkt_queue.lock); + spin_unlock(&sd->input_pkt_queue.lock); #endif } @@ -2346,63 +2346,74 @@ done: } /* Called from hardirq (IPI) context */ -static void trigger_softirq(void *data) +static void rps_trigger_softirq(void *data) { - struct softnet_data *queue = data; - __napi_schedule(&queue->backlog); + struct softnet_data *sd = data; + + __napi_schedule(&sd->backlog); __get_cpu_var(netdev_rx_stat).received_rps++; } + #endif /* CONFIG_RPS */ /* + * Check if this softnet_data structure is another cpu one + * If yes, queue it to our IPI list and return 1 + * If no, return 0 + */ +static int rps_ipi_queued(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + struct softnet_data *mysd = &__get_cpu_var(softnet_data); + + if (sd != mysd) { + sd->rps_ipi_next = mysd->rps_ipi_list; + mysd->rps_ipi_list = sd; + + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + return 1; + } +#endif /* CONFIG_RPS */ + return 0; +} + +/* * enqueue_to_backlog is called to queue an skb to a per CPU backlog * queue (may be a remote CPU queue). */ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned int *qtail) { - struct softnet_data *queue; + struct softnet_data *sd; unsigned long flags; - queue = &per_cpu(softnet_data, cpu); + sd = &per_cpu(softnet_data, cpu); local_irq_save(flags); __get_cpu_var(netdev_rx_stat).total++; - rps_lock(queue); - if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { - if (queue->input_pkt_queue.qlen) { + rps_lock(sd); + if (sd->input_pkt_queue.qlen <= netdev_max_backlog) { + if (sd->input_pkt_queue.qlen) { enqueue: - __skb_queue_tail(&queue->input_pkt_queue, skb); + __skb_queue_tail(&sd->input_pkt_queue, skb); #ifdef CONFIG_RPS - *qtail = queue->input_queue_head + - queue->input_pkt_queue.qlen; + *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen; #endif - rps_unlock(queue); + rps_unlock(sd); local_irq_restore(flags); return NET_RX_SUCCESS; } /* Schedule NAPI for backlog device */ - if (napi_schedule_prep(&queue->backlog)) { -#ifdef CONFIG_RPS - if (cpu != smp_processor_id()) { - struct softnet_data *myqueue; - - myqueue = &__get_cpu_var(softnet_data); - queue->rps_ipi_next = myqueue->rps_ipi_list; - myqueue->rps_ipi_list = queue; - - __raise_softirq_irqoff(NET_RX_SOFTIRQ); - goto enqueue; - } -#endif - __napi_schedule(&queue->backlog); + if (napi_schedule_prep(&sd->backlog)) { + if (!rps_ipi_queued(sd)) + __napi_schedule(&sd->backlog); } goto enqueue; } - rps_unlock(queue); + rps_unlock(sd); __get_cpu_var(netdev_rx_stat).dropped++; local_irq_restore(flags); @@ -2903,17 +2914,17 @@ EXPORT_SYMBOL(netif_receive_skb); static void flush_backlog(void *arg) { struct net_device *dev = arg; - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *sd = &__get_cpu_var(softnet_data); struct sk_buff *skb, *tmp; - rps_lock(queue); - skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) + rps_lock(sd); + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) if (skb->dev == dev) { - __skb_unlink(skb, &queue->input_pkt_queue); + __skb_unlink(skb, &sd->input_pkt_queue); kfree_skb(skb); - incr_input_queue_head(queue); + input_queue_head_incr(sd); } - rps_unlock(queue); + rps_unlock(sd); } static int napi_gro_complete(struct sk_buff *skb) @@ -3219,23 +3230,23 @@ EXPORT_SYMBOL(napi_gro_frags); static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; - struct softnet_data *queue = &__get_cpu_var(softnet_data); + struct softnet_data *sd = &__get_cpu_var(softnet_data); napi->weight = weight_p; do { struct sk_buff *skb; local_irq_disable(); - rps_lock(queue); - skb = __skb_dequeue(&queue->input_pkt_queue); + rps_lock(sd); + skb = __skb_dequeue(&sd->input_pkt_queue); if (!skb) { __napi_complete(napi); - rps_unlock(queue); + rps_unlock(sd); local_irq_enable(); break; } - incr_input_queue_head(queue); - rps_unlock(queue); + input_queue_head_incr(sd); + rps_unlock(sd); local_irq_enable(); __netif_receive_skb(skb); @@ -3331,24 +3342,25 @@ EXPORT_SYMBOL(netif_napi_del); * net_rps_action sends any pending IPI's for rps. * Note: called with local irq disabled, but exits with local irq enabled. */ -static void net_rps_action(void) +static void net_rps_action_and_irq_disable(void) { #ifdef CONFIG_RPS - struct softnet_data *locqueue = &__get_cpu_var(softnet_data); - struct softnet_data *remqueue = locqueue->rps_ipi_list; + struct softnet_data *sd = &__get_cpu_var(softnet_data); + struct softnet_data *remsd = sd->rps_ipi_list; - if (remqueue) { - locqueue->rps_ipi_list = NULL; + if (remsd) { + sd->rps_ipi_list = NULL; local_irq_enable(); /* Send pending IPI's to kick RPS processing on remote cpus. */ - while (remqueue) { - struct softnet_data *next = remqueue->rps_ipi_next; - if (cpu_online(remqueue->cpu)) - __smp_call_function_single(remqueue->cpu, - &remqueue->csd, 0); - remqueue = next; + while (remsd) { + struct softnet_data *next = remsd->rps_ipi_next; + + if (cpu_online(remsd->cpu)) + __smp_call_function_single(remsd->cpu, + &remsd->csd, 0); + remsd = next; } } else #endif @@ -3423,7 +3435,7 @@ static void net_rx_action(struct softirq_action *h) netpoll_poll_unlock(have); } out: - net_rps_action(); + net_rps_action_and_irq_disable(); #ifdef CONFIG_NET_DMA /* @@ -5595,7 +5607,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); - incr_input_queue_head(oldsd); + input_queue_head_incr(oldsd); } return NOTIFY_OK; @@ -5812,24 +5824,23 @@ static int __init net_dev_init(void) */ for_each_possible_cpu(i) { - struct softnet_data *queue; + struct softnet_data *sd = &per_cpu(softnet_data, i); - queue = &per_cpu(softnet_data, i); - skb_queue_head_init(&queue->input_pkt_queue); - queue->completion_queue = NULL; - INIT_LIST_HEAD(&queue->poll_list); + skb_queue_head_init(&sd->input_pkt_queue); + sd->completion_queue = NULL; + INIT_LIST_HEAD(&sd->poll_list); #ifdef CONFIG_RPS - queue->csd.func = trigger_softirq; - queue->csd.info = queue; - queue->csd.flags = 0; - queue->cpu = i; + sd->csd.func = rps_trigger_softirq; + sd->csd.info = sd; + sd->csd.flags = 0; + sd->cpu = i; #endif - queue->backlog.poll = process_backlog; - queue->backlog.weight = weight_p; - queue->backlog.gro_list = NULL; - queue->backlog.gro_count = 0; + sd->backlog.poll = process_backlog; + sd->backlog.weight = weight_p; + sd->backlog.gro_list = NULL; + sd->backlog.gro_count = 0; } dev_boot_phase = 0; -- cgit v1.1 From b249dcb82d327e419d3cb45773b146ebb5faf419 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 19 Apr 2010 21:56:38 +0000 Subject: rps: consistent rxhash In case we compute a software skb->rxhash, we can generate a consistent hash : Its value will be the same in both flow directions. This helps some workloads, like conntracking, since the same state needs to be accessed in both directions. tbench + RFS + this patch gives better results than tbench with default kernel configuration (no RPS, no RFS) Also fixed some sparse warnings. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/core/dev.c | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 7f5755b..0d78e04 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1974,7 +1974,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else - hash = skb->protocol; + hash = (__force u16) skb->protocol; hash = jhash_1word(hash, hashrnd); @@ -2253,8 +2253,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ip = (struct iphdr *) skb->data; ip_proto = ip->protocol; - addr1 = ip->saddr; - addr2 = ip->daddr; + addr1 = (__force u32) ip->saddr; + addr2 = (__force u32) ip->daddr; ihl = ip->ihl; break; case __constant_htons(ETH_P_IPV6): @@ -2263,8 +2263,8 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ip6 = (struct ipv6hdr *) skb->data; ip_proto = ip6->nexthdr; - addr1 = ip6->saddr.s6_addr32[3]; - addr2 = ip6->daddr.s6_addr32[3]; + addr1 = (__force u32) ip6->saddr.s6_addr32[3]; + addr2 = (__force u32) ip6->daddr.s6_addr32[3]; ihl = (40 >> 2); break; default: @@ -2279,14 +2279,25 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, case IPPROTO_AH: case IPPROTO_SCTP: case IPPROTO_UDPLITE: - if (pskb_may_pull(skb, (ihl * 4) + 4)) - ports = *((u32 *) (skb->data + (ihl * 4))); + if (pskb_may_pull(skb, (ihl * 4) + 4)) { + __be16 *hports = (__be16 *) (skb->data + (ihl * 4)); + u32 sport, dport; + + sport = (__force u16) hports[0]; + dport = (__force u16) hports[1]; + if (dport < sport) + swap(sport, dport); + ports = (sport << 16) + dport; + } break; default: break; } + /* get a consistent hash (same value on both flow directions) */ + if (addr2 < addr1) + swap(addr1, addr2); skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd); if (!skb->rxhash) skb->rxhash = 1; -- cgit v1.1 From ab9304717f7624c41927f442e6b6d418b2d8b3e4 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 20 Apr 2010 01:45:37 -0700 Subject: net: emphasize rtnl lock required in call_netdevice_notifiers Since netdev_chain is guarded by rtnl_lock, ASSERT_RTNL should be present here to make sure that all callers of call_netdevice_notifiers does the locking properly. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- net/core/dev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'net/core/dev.c') diff --git a/net/core/dev.c b/net/core/dev.c index 0d78e04..b31d5d6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1435,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier); int call_netdevice_notifiers(unsigned long val, struct net_device *dev) { + ASSERT_RTNL(); return raw_notifier_call_chain(&netdev_chain, val, dev); } -- cgit v1.1