diff options
author | Chris Leech <christopher.leech@intel.com> | 2008-01-31 16:53:23 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-31 19:28:24 -0800 |
commit | e83a2ea850bf0c0c81c675444080970fc07798c6 (patch) | |
tree | ebdf251be6fa2f9b2b482cd0e6393fdbfc8278a0 /net/core/dev_mcast.c | |
parent | 16ca3f913001efdb6171a2781ef41c77474e3895 (diff) | |
download | kernel_samsung_crespo-e83a2ea850bf0c0c81c675444080970fc07798c6.zip kernel_samsung_crespo-e83a2ea850bf0c0c81c675444080970fc07798c6.tar.gz kernel_samsung_crespo-e83a2ea850bf0c0c81c675444080970fc07798c6.tar.bz2 |
[VLAN]: set_rx_mode support for unicast address list
Reuse the existing logic for multicast list synchronization for the
unicast address list. The core of dev_mc_sync/unsync are split out as
__dev_addr_sync/unsync and moved from dev_mcast.c to dev.c. These are
then used to implement dev_unicast_sync/unsync as well.
I'm working on cleaning up Intel's FCoE stack, which generates new MAC
addresses from the fibre channel device id assigned by the fabric as
per the current draft specification in T11. When using such a
protocol in a VLAN environment it would be nice to not always be
forced into promiscuous mode, assuming the underlying Ethernet driver
supports multiple unicast addresses as well.
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/core/dev_mcast.c')
-rw-r--r-- | net/core/dev_mcast.c | 39 |
1 files changed, 5 insertions, 34 deletions
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index cadbfbf..cec5825 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c @@ -113,32 +113,15 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) * locked by netif_tx_lock_bh. * * This function is intended to be called from the dev->set_multicast_list - * function of layered software devices. + * or dev->set_rx_mode function of layered software devices. */ int dev_mc_sync(struct net_device *to, struct net_device *from) { - struct dev_addr_list *da, *next; int err = 0; netif_tx_lock_bh(to); - da = from->mc_list; - while (da != NULL) { - next = da->next; - if (!da->da_synced) { - err = __dev_addr_add(&to->mc_list, &to->mc_count, - da->da_addr, da->da_addrlen, 0); - if (err < 0) - break; - da->da_synced = 1; - da->da_users++; - } else if (da->da_users == 1) { - __dev_addr_delete(&to->mc_list, &to->mc_count, - da->da_addr, da->da_addrlen, 0); - __dev_addr_delete(&from->mc_list, &from->mc_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } + err = __dev_addr_sync(&to->mc_list, &to->mc_count, + &from->mc_list, &from->mc_count); if (!err) __dev_set_rx_mode(to); netif_tx_unlock_bh(to); @@ -160,23 +143,11 @@ EXPORT_SYMBOL(dev_mc_sync); */ void dev_mc_unsync(struct net_device *to, struct net_device *from) { - struct dev_addr_list *da, *next; - netif_tx_lock_bh(from); netif_tx_lock_bh(to); - da = from->mc_list; - while (da != NULL) { - next = da->next; - if (da->da_synced) { - __dev_addr_delete(&to->mc_list, &to->mc_count, - da->da_addr, da->da_addrlen, 0); - da->da_synced = 0; - __dev_addr_delete(&from->mc_list, &from->mc_count, - da->da_addr, da->da_addrlen, 0); - } - da = next; - } + __dev_addr_unsync(&to->mc_list, &to->mc_count, + &from->mc_list, &from->mc_count); __dev_set_rx_mode(to); netif_tx_unlock_bh(to); |